repo_name
stringlengths 6
77
| path
stringlengths 8
215
| license
stringclasses 15
values | content
stringlengths 335
154k
|
---|---|---|---|
tpin3694/tpin3694.github.io | python/pandas_apply_operations_to_groups.ipynb | mit | # import modules
import pandas as pd
# Create dataframe
raw_data = {'regiment': ['Nighthawks', 'Nighthawks', 'Nighthawks', 'Nighthawks', 'Dragoons', 'Dragoons', 'Dragoons', 'Dragoons', 'Scouts', 'Scouts', 'Scouts', 'Scouts'],
'company': ['1st', '1st', '2nd', '2nd', '1st', '1st', '2nd', '2nd','1st', '1st', '2nd', '2nd'],
'name': ['Miller', 'Jacobson', 'Ali', 'Milner', 'Cooze', 'Jacon', 'Ryaner', 'Sone', 'Sloan', 'Piger', 'Riani', 'Ali'],
'preTestScore': [4, 24, 31, 2, 3, 4, 24, 31, 2, 3, 2, 3],
'postTestScore': [25, 94, 57, 62, 70, 25, 94, 57, 62, 70, 62, 70]}
df = pd.DataFrame(raw_data, columns = ['regiment', 'company', 'name', 'preTestScore', 'postTestScore'])
df
# Create a groupby variable that groups preTestScores by regiment
groupby_regiment = df['preTestScore'].groupby(df['regiment'])
groupby_regiment
"""
Explanation: Title: Apply Operations To Groups In Pandas
Slug: pandas_apply_operations_to_groups
Summary: Apply Operations To Groups In Pandas
Date: 2016-05-01 12:00
Category: Python
Tags: Data Wrangling
Authors: Chris Albon
Preliminaries
End of explanation
"""
list(df['preTestScore'].groupby(df['regiment']))
"""
Explanation: "This grouped variable is now a GroupBy object. It has not actually computed anything yet except for some intermediate data about the group key df['key1']. The idea is that this object has all of the information needed to then apply some operation to each of the groups." - Python for Data Analysis
View a grouping
Use list() to show what a grouping looks like
End of explanation
"""
df['preTestScore'].groupby(df['regiment']).describe()
"""
Explanation: Descriptive statistics by group
End of explanation
"""
groupby_regiment.mean()
"""
Explanation: Mean of each regiment's preTestScore
End of explanation
"""
df['preTestScore'].groupby([df['regiment'], df['company']]).mean()
"""
Explanation: Mean preTestScores grouped by regiment and company
End of explanation
"""
df['preTestScore'].groupby([df['regiment'], df['company']]).mean().unstack()
"""
Explanation: Mean preTestScores grouped by regiment and company without heirarchical indexing
End of explanation
"""
df.groupby(['regiment', 'company']).mean()
"""
Explanation: Group the entire dataframe by regiment and company
End of explanation
"""
df.groupby(['regiment', 'company']).size()
"""
Explanation: Number of observations in each regiment and company
End of explanation
"""
# Group the dataframe by regiment, and for each regiment,
for name, group in df.groupby('regiment'):
# print the name of the regiment
print(name)
# print the data of that regiment
print(group)
"""
Explanation: Iterate an operations over groups
End of explanation
"""
list(df.groupby(df.dtypes, axis=1))
"""
Explanation: Group by columns
Specifically in this case: group by the data types of the columns (i.e. axis=1) and then use list() to view what that grouping looks like
End of explanation
"""
df.groupby('regiment').mean().add_prefix('mean_')
"""
Explanation: In the dataframe "df", group by "regiments, take the mean values of the other variables for those groups, then display them with the prefix_mean
End of explanation
"""
def get_stats(group):
return {'min': group.min(), 'max': group.max(), 'count': group.count(), 'mean': group.mean()}
"""
Explanation: Create a function to get the stats of a group
End of explanation
"""
bins = [0, 25, 50, 75, 100]
group_names = ['Low', 'Okay', 'Good', 'Great']
df['categories'] = pd.cut(df['postTestScore'], bins, labels=group_names)
"""
Explanation: Create bins and bin up postTestScore by those pins
End of explanation
"""
df['postTestScore'].groupby(df['categories']).apply(get_stats).unstack()
"""
Explanation: Apply the get_stats() function to each postTestScore bin
End of explanation
"""
|
ethen8181/machine-learning | python/algorithms/search_sort.ipynb | mit | from jupyterthemes import get_themes
from jupyterthemes.stylefx import set_nb_theme
themes = get_themes()
set_nb_theme(themes[1])
%load_ext watermark
%watermark -a 'Ethen' -d -t -v -p jupyterthemes
"""
Explanation: <h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Searching,-Hashing,-Sorting" data-toc-modified-id="Searching,-Hashing,-Sorting-1"><span class="toc-item-num">1 </span>Searching, Hashing, Sorting</a></span><ul class="toc-item"><li><span><a href="#Searching" data-toc-modified-id="Searching-1.1"><span class="toc-item-num">1.1 </span>Searching</a></span></li><li><span><a href="#Hashing" data-toc-modified-id="Hashing-1.2"><span class="toc-item-num">1.2 </span>Hashing</a></span></li><li><span><a href="#Sorting" data-toc-modified-id="Sorting-1.3"><span class="toc-item-num">1.3 </span>Sorting</a></span></li></ul></li></ul></div>
End of explanation
"""
def binary_search(testlist, query):
if not testlist:
return False
else:
mid_idx = len(testlist) // 2
mid = testlist[mid_idx]
if mid == query:
return True
elif query < mid:
return binary_search(testlist[:mid_idx], query)
else:
return binary_search(testlist[mid_idx + 1:], query)
testlist = [0, 1, 2, 8, 13, 17, 19, 32, 42]
query = 19
print(binary_search(testlist, query))
query = 3
print(binary_search(testlist, query))
"""
Explanation: Searching, Hashing, Sorting
Following the online book, Problem Solving with Algorithms and Data Structures. Chapter 6 introduces methods for searching and sorting numbers.
Searching
We can take advantage of a ordered list by doing a binary search. We start by searching in the middle, if it is not the item that we're searching for, we can use the ordered nature of the list to eliminate half of the remaining items.
End of explanation
"""
class HashTable:
"""
a.k.a python's dictionary
the initial size of the table has been chosen to
be 11, although this number is arbitrary, it's important
for it to be a prime number so that collision resolution
will be efficient; this implementation does not handle
resizing the hashtable when it runs out of the original size
"""
def __init__(self):
# slot will hold the key and data will hold the value
self.size = 11
self.slot = [None] * self.size
self.data = [None] * self.size
def _put(self, key, value):
hash_value = self._hash(key)
if self.slot[hash_value] == None:
self.slot[hash_value] = key
self.data[hash_value] = value
elif self.slot[hash_value] == key:
# replace the original key value
self.data[hash_value] = value
else:
# rehash to get the next location possible
# if a collision is to occurr
next_slot = self._rehash(hash_value)
slot_value = self.slot[next_slot]
while slot_value != None and slot_value != key:
next_slot = self._rehash(next_slot)
slot_value = self.slot[next_slot]
if self.slot[next_slot] == None:
self.slot[next_slot] = key
self.data[next_slot] = value
else:
self.data[next_slot] = value
def _get(self, key):
data = None
stop = False
found = False
start_slot = self._hash(key)
next_slot = start_slot
while self.slot[next_slot] != None and not found and not stop:
if self.slot[next_slot] == key:
data = self.data[next_slot]
found = True
else:
# if we rehash to the starting value
# then it means the data is not here
next_slot = self._rehash(next_slot)
if next_slot == start_slot:
stop = True
return data
def _hash(self, key):
return key % self.size
def _rehash(self, oldhash):
"""
a simple plus 1 rehash, where we add 1 to
the original value and hash it again to
see if the slot it empty (None)
"""
return (oldhash + 1) % self.size
def __getitem__(self, key):
# allow access using``[]`` syntax
return self._get(key)
def __setitem__(self, key, value):
self._put(key, value)
H = HashTable()
H[54] = 'cat'
H[26] = 'dog'
H[93] = 'lion'
H[17] = 'tiger'
H[77] = 'bird'
H[44] = 'goat'
H[55] = 'pig'
print(H.slot)
print(H.data)
print(H[55])
print(H[20])
"""
Explanation: Keep in mind that this approach requires sorting the list, which may not be ideal if we're simply going to search for 1 number on the very large list (since we have to first sort the list, which is not a cheap operation).
Hashing
Quick notes:
Dictionary or map that let us stor key value pairs is typically implemented using hash table.
An element that we wish to add is converted into an integer by using a hash function. hash = hash_function(key)
Th resulting hash is independent of the underlying array size, and it is then reduced to an index by using the modulo operator. index = hash % array_size
Python has a built in hash function that can calculate has hash values for arbitrarily large objects in a fast manner. Hash function that is used for dictionary or maps should be deterministic so we can look up the values afterwards, fast to compute, else the overhead of hashing would offset the benefit it brings for fast lookup, and uniform distributed, this one is related to avoiding hash collision.
Hash collision happens when two different inputs produce the same hash value. To avoid this a well implemented hash table should be able to resolve hash collision, common techniques include linear probing and separate chaining. Also wehn our hash table is almost saturated (the number of elements is close to the array size we've defined), a.k.a the load factor is larger than some portion, then it should be able to dynamically resize the hash table to maintain our dictionary and map's performance.
Real Python: Build a Hash Table in Python With TDD has a much in depth introduction to this topic.
End of explanation
"""
def merge_sort(alist):
if len(alist) > 1:
mid = len(alist) // 2
left_half = alist[:mid]
right_half = alist[mid:]
merge_sort(left_half)
merge_sort(right_half)
# loop through the left and right half,
# compare the value and fill them back
# to the original list
i, j, k = 0, 0, 0
while i < len(left_half) and j < len(right_half):
if left_half[i] < right_half[j]:
alist[k] = left_half[i]
i += 1
else:
alist[k] = right_half[j]
j += 1
k += 1
# after filling in the sorted value,
# there will be left-over values on
# either the left or right half, simply
# append all the left-over values back
while i < len(left_half):
alist[k] = left_half[i]
i += 1
k += 1
while j < len(right_half):
alist[k] = right_half[j]
j += 1
k += 1
return alist
alist = [54, 26, 93, 17, 77]
merge_sort(alist)
"""
Explanation: Sorting
Merge Sort
End of explanation
"""
def quick_sort(alist):
_sort(alist, 0, len(alist) - 1)
def _sort(alist, first, last):
if first < last:
split_point = _partition(alist, first, last)
_sort(alist, first, split_point - 1)
_sort(alist, split_point + 1, last)
def _partition(alist, first, last):
right = last
left = first + 1
pivot_value = alist[first]
# find the split point of the pivot and move all other
# items to the appropriate side of the list (i.e. if
# the item is greater than pivot, then it should be
# on the right hand side and vice versa)
done = False
while not done:
while left <= right and alist[left] <= pivot_value:
left += 1
while alist[right] >= pivot_value and right >= left:
right -= 1
if right <= left:
done = True
else:
alist[right], alist[left] = alist[left], alist[right]
# swap pivot value to split point
alist[first], alist[right] = alist[right], alist[first]
return right
# list sorted in place
alist = [54, 26, 93, 17, 77, 50]
quick_sort(alist)
alist
"""
Explanation: Quick Sort
End of explanation
"""
|
hongguangguo/shogun | doc/ipython-notebooks/logdet/logdet.ipynb | gpl-3.0 | %matplotlib inline
from scipy.sparse import eye
from scipy.io import mmread
from matplotlib import pyplot as plt
matFile='../../../data/logdet/apache2.mtx.gz'
M = mmread(matFile)
rows = M.shape[0]
cols = M.shape[1]
A = M + eye(rows, cols) * 10000.0
plt.title("A")
plt.spy(A, precision = 1e-2, marker = '.', markersize = 0.01)
plt.show()
"""
Explanation: Implement estimators of large-scale sparse Gaussian densities
by Soumyajit De (email: [email protected], [email protected]. Github: <a href="https://github.com/lambday">lambday</a>)<br/> Many many thanks to my mentor Heiko Strathmann, Sergey Lisitsyn, Sören Sonnenburg, Viktor Gal
This notebook illustrates large-scale sparse Gaussian density likelihood estimation. It first introduces the reader to the mathematical background and then shows how one can do the estimation with Shogun on a number of real-world data sets.
<h2>Theoretical introduction</h2>
<p><i>Multivariate Gaussian distributions</i>, i.e. some random vector $\mathbf{x}\in\mathbb{R}^n$ having probability density function
$$p(\mathbf{x}|\boldsymbol\mu, \boldsymbol\Sigma)=(2\pi)^{-n/2}\text{det}(\boldsymbol\Sigma)^{-1/2} \exp\left(-\frac{1}{2}(\mathbf{x}-\boldsymbol\mu)^{T}\boldsymbol\Sigma^{-1}(\mathbf{x}-\boldsymbol\mu)\right)$$
$\boldsymbol\mu$ being the mean vector and $\boldsymbol\Sigma$ being the covariance matrix, arise in numerous occassions involving large datasets. Computing <i>log-likelihood</i> in these requires computation of the log-determinant of the covariance matrix
$$\mathcal{L}(\mathbf{x}|\boldsymbol\mu,\boldsymbol\Sigma)=-\frac{n}{2}\log(2\pi)-\frac{1}{2}\log(\text{det}(\boldsymbol\Sigma))-\frac{1}{2}(\mathbf{x}-\boldsymbol\mu)^{T}\boldsymbol\Sigma^{-1}(\mathbf{x}-\boldsymbol\mu)$$
The covariance matrix and its inverse are symmetric positive definite (spd) and are often sparse, e.g. due to conditional independence properties of Gaussian Markov Random Fields (GMRF). Therefore they can be stored efficiently even for large dimension $n$.</p>
<p>The usual technique for computing the log-determinant term in the likelihood expression relies on <i><a href="http://en.wikipedia.org/wiki/Cholesky_factorization">Cholesky factorization</a></i> of the matrix, i.e. $\boldsymbol\Sigma=\mathbf{LL}^{T}$, ($\mathbf{L}$ is the lower triangular Cholesky factor) and then using the diagonal entries of the factor to compute $\log(\text{det}(\boldsymbol\Sigma))=2\sum_{i=1}^{n}\log(\mathbf{L}_{ii})$. However, for sparse matrices, as covariance matrices usually are, the Cholesky factors often suffer from <i>fill-in</i> phenomena - they turn out to be not so sparse themselves. Therefore, for large dimensions this technique becomes infeasible because of a massive memory requirement for storing all these irrelevant non-diagonal co-efficients of the factor. While ordering techniques have been developed to permute the rows and columns beforehand in order to reduce fill-in, e.g. <i><a href="http://en.wikipedia.org/wiki/Minimum_degree_algorithm">approximate minimum degree</a></i> (AMD) reordering, these techniques depend largely on the sparsity pattern and therefore not guaranteed to give better result.</p>
<p>Recent research shows that using a number of techniques from complex analysis, numerical linear algebra and greedy graph coloring, we can, however, approximate the log-determinant up to an arbitrary precision [<a href="http://link.springer.com/article/10.1007%2Fs11222-012-9368-y">Aune et. al., 2012</a>]. The main trick lies within the observation that we can write $\log(\text{det}(\boldsymbol\Sigma))$ as $\text{trace}(\log(\boldsymbol\Sigma))$, where $\log(\boldsymbol\Sigma)$ is the matrix-logarithm. Computing the log-determinant then requires extracting the trace of the matrix-logarithm as
$$\text{trace}(\log(\boldsymbol\Sigma))=\sum_{j=1}^{n}\mathbf{e}^{T}_{j}\log(\boldsymbol\Sigma)\mathbf{e}_{j}$$
where each $\mathbf{e}_{j}$ is a unit basis vector having a 1 in its $j^{\text{th}}$ position while rest are zeros and we assume that we can compute $\log(\boldsymbol\Sigma)\mathbf{e}_{j}$ (explained later). For large dimension $n$, this approach is still costly, so one needs to rely on sampling the trace. For example, using stochastic vectors we can obtain a <i><a href="http://en.wikipedia.org/wiki/Monte_Carlo_method">Monte Carlo estimator</a></i> for the trace -
$$\text{trace}(\log(\boldsymbol\Sigma))=\mathbb{E}_{\mathbf{v}}(\mathbf{v}^{T}\log(\boldsymbol\Sigma)\mathbf{v})\approx \sum_{j=1}^{k}\mathbf{s}^{T}_{j}\log(\boldsymbol\Sigma)\mathbf{s}_{j}$$
where the source vectors ($\mathbf{s}_{j}$) have zero mean and unit variance (e.g. $\mathbf{s}_{j}\sim\mathcal{N}(\mathbf{0}, \mathbf{I}), \forall j\in[1\cdots k]$). But since this is a Monte Carlo method, we need many many samples to get sufficiently accurate approximation. However, by a method suggested in Aune et. al., we can reduce the number of samples required drastically by using <i>probing-vectors</i> that are obtained from <a href="http://en.wikipedia.org/wiki/Graph_coloring">coloring of the adjacency graph</a> represented by the power of the sparse-matrix, $\boldsymbol\Sigma^{p}$, i.e. we can obtain -
$$\mathbb{E}_{\mathbf{v}}(\mathbf{v}^{T}\log(\boldsymbol\Sigma)\mathbf{v})\approx \sum_{j=1}^{m}\mathbf{w}^{T}_{j}\log(\boldsymbol\Sigma)\mathbf{w}_{j}$$
with $m\ll n$, where $m$ is the number of colors used in the graph coloring. For a particular color $j$, the probing vector $\mathbb{w}_{j}$ is obtained by filling with $+1$ or $-1$ uniformly randomly for entries corresponding to nodes of the graph colored with $j$, keeping the rest of the entries as zeros. Since the matrix is sparse, the number of colors used is usually very small compared to the dimension $n$, promising the advantage of this approach.</p>
<p>There are two main issues in this technique. First, computing $\boldsymbol\Sigma^{p}$ is computationally costly, but experiments show that directly applying a <i>d-distance</i> coloring algorithm on the sparse matrix itself also results in a pretty good approximation. Second, computing the exact matrix-logarithm is often infeasible because its is not guaranteed to be sparse. Aune et. al. suggested that we can rely on rational approximation of the matrix-logarithm times vector using an approach described in <a href="http://eprints.ma.man.ac.uk/1136/01/covered/MIMS_ep2007_103.pdf">Hale et. al [2008]</a>, i.e. writing $\log(\boldsymbol\Sigma)\mathbf{w}_{j}$ in our desired expression using <i><a href="http://en.wikipedia.org/wiki/Cauchy's_integral_formula">Cauchy's integral formula</a></i> as -
$$log(\boldsymbol\Sigma)\mathbf{w}_{j}=\frac{1}{2\pi i}\oint_{\Gamma}log(z)(z\mathbf{I}-\boldsymbol\Sigma)^{-1}\mathbf{w}_{j}dz\approx \frac{-8K(\lambda_{m}\lambda_{M})^{\frac{1}{4}}}{k\pi N} \boldsymbol\Sigma\Im\left(-\sum_{l=1}^{N}\alpha_{l}(\boldsymbol\Sigma-\sigma_{l}\mathbf{I})^{-1}\mathbf{w}_{j}\right)$$
$K$, $k \in \mathbb{R}$, $\alpha_{l}$, $\sigma_{l} \in \mathbb{C}$ are coming from <i><a href="http://en.wikipedia.org/wiki/Jacobi_elliptic_functions">Jacobi elliptic functions</a></i>, $\lambda_{m}$ and $\lambda_{M}$ are the minimum/maximum eigenvalues of $\boldsymbol\Sigma$ (they have to be real-positive), respectively, $N$ is the number of contour points in the quadrature rule of the above integral and $\Im(\mathbf{x})$ represents the imaginary part of $\mathbf{x}\in\mathbb{C}^{n}$.</p>
<p>The problem then finally boils down to solving the shifted family of linear systems $(\boldsymbol\Sigma-\sigma_{l}\mathbf{I})\mathbb{x}_{j}=\mathbb{w}_{j}$. Since $\boldsymbol\Sigma$ is sparse, matrix-vector product is not much costly and therefore these systems can be solved with a low memory-requirement using <i>Krylov subspace iterative solvers</i> like <i><a href="http://en.wikipedia.org/wiki/Conjugate_gradient_method">Conjugate Gradient</a></i> (CG). Since the shifted matrices have complex entries along their diagonal, the appropriate method to choose is <i>Conjugate Orthogonal Conjugate Gradient</i> (COCG) [<a href="http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=106415&tag=1">H.A. van der Vorst et. al., 1990.</a>]. Alternatively, these systems can be solved at once using <i>CG-M</i> [<a href"http://arxiv.org/abs/hep-lat/9612014">Jegerlehner, 1996.</a>] solver which solves for $(\mathbf{A}+\sigma\mathbf{I})\mathbf{x}=\mathbf{b}$ for all values of $\sigma$ using as many matrix-vector products in the CG-iterations as required to solve for one single shifted system. This algorithm shows reliable convergance behavior for systems with reasonable condition number.</p>
<p>One interesting property of this approach is that once the graph coloring information and shifts/weights are known, all the computation components - solving linear systems, computing final vector-vector product - are independently computable. Therefore, computation can be speeded up using parallel computation of these. To use this, a computation framework for Shogun is developed and the whole log-det computation works on top of it.</p>
<h2>An example of using this approach in Shogun</h2>
<p>We demonstrate the usage of this technique to estimate log-determinant of a real-valued spd sparse matrix with dimension $715,176\times 715,176$ with $4,817,870$ non-zero entries, <a href="http://www.cise.ufl.edu/research/sparse/matrices/GHS_psdef/apache2.html">apache2</a>, which is obtained from the <a href="http://www.cise.ufl.edu/research/sparse/matrices/">The University of Florida Sparse Matrix Collection</a>. Cholesky factorization with AMD for this sparse-matrix gives rise to factors with $353,843,716$ non-zero entries (from source). We use CG-M solver to solve the shifted systems which is then used with <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CSerialComputationEngine.html">SerialComputationEngine</a> to perform the computation jobs sequentially on a single core, that works even on a normal Desktop machine. Since the original matrix is badly conditioned, here we added a ridge along its diagonal to reduce the condition number so that the CG-M solver converges within reasonable time. Please note that for high condition number, the number of iteration has to be set very high.
End of explanation
"""
from modshogun import RealSparseMatrixOperator, LanczosEigenSolver
op = RealSparseMatrixOperator(A.tocsc())
# Lanczos iterative Eigensolver to compute the min/max Eigenvalues which is required to compute the shifts
eigen_solver = LanczosEigenSolver(op)
# we set the iteration limit high to compute the eigenvalues more accurately, default iteration limit is 1000
eigen_solver.set_max_iteration_limit(2000)
# computing the eigenvalues
eigen_solver.compute()
print 'Minimum Eigenvalue:', eigen_solver.get_min_eigenvalue()
print 'Maximum Eigenvalue:', eigen_solver.get_max_eigenvalue()
"""
Explanation: First, to keep the notion of Krylov subspace, we view the matrix as a linear operator that applies on a vector, resulting a new vector. We use <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CSparseMatrixOperator.html">RealSparseMatrixOperator</a> that is suitable for this example. All the solvers work with <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CLinearOperator.html">LinearOperator</a> type objects. For computing the eigenvalues, we use <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CLanczosEigenSolver.html">LanczosEigenSolver</a> class. Although computation of the Eigenvalues is done internally within the log-determinant estimator itself (see below), here we explicitely precompute them.
End of explanation
"""
# We can specify the power of the sparse-matrix that is to be used for coloring, default values will apply a
# 2-distance greedy graph coloring algorithm on the sparse-matrix itself. Matrix-power, if specified, is computed in O(lg p)
from modshogun import ProbingSampler
trace_sampler = ProbingSampler(op)
# apply the graph coloring algorithm and generate the number of colors, i.e. number of trace samples
trace_sampler.precompute()
print 'Number of colors used:', trace_sampler.get_num_samples()
"""
Explanation: Next, we use <a href="http://www.shogun-toolbox.org/doc/en/latest/ProbingSampler_8h_source.html">ProbingSampler</a> class which uses an external library <a href="http://www.cscapes.org/coloringpage/">ColPack</a>. Again, the number of colors used is precomputed for demonstration purpose, although computed internally inside the log-determinant estimator.
End of explanation
"""
from modshogun import SerialComputationEngine, CGMShiftedFamilySolver, LogRationalApproximationCGM
engine = SerialComputationEngine()
cgm = CGMShiftedFamilySolver()
# setting the iteration limit (set this to higher value for higher condition number)
cgm.set_iteration_limit(100)
# accuracy determines the number of contour points in the rational approximation (i.e. number of shifts in the systems)
accuracy = 1E-15
# we create a operator-log-function using the sparse matrix operator that uses CG-M to solve the shifted systems
op_func = LogRationalApproximationCGM(op, engine, eigen_solver, cgm, accuracy)
op_func.precompute()
print 'Number of shifts:', op_func.get_num_shifts()
"""
Explanation: <p>This corresponds to averaging over 13 source vectors rather than one (but has much lower variance as using 13 Gaussian source vectors). A comparison between the convergence behavior of using probing sampler and Gaussian sampler is presented later.</p>
<p>Then we define <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CLogRationalApproximationCGM.html">LogRationalApproximationCGM</a> operator function class, which internally uses the Eigensolver to compute the Eigenvalues, uses <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CJacobiEllipticFunctions.html">JacobiEllipticFunctions</a> to compute the complex shifts, weights and the constant multiplier in the rational approximation expression, takes the probing vector generated by the trace sampler and submits a computation job to the engine which then uses CG-M solver (<a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CCGMShiftedFamilySolver.html">CGMShiftedFamilySolver</a>) to solve the shifted systems. Precompute is not necessary here too.</p>
End of explanation
"""
import numpy as np
from modshogun import LogDetEstimator
# number of log-det samples (use a higher number to get better estimates)
# (this is 5 times number of colors estimate in practice, so usually 1 probing estimate is enough)
num_samples = 5
log_det_estimator = LogDetEstimator(trace_sampler, op_func, engine)
estimates = log_det_estimator.sample(num_samples)
estimated_logdet = np.mean(estimates)
print 'Estimated log(det(A)):', estimated_logdet
"""
Explanation: Finally, we use the <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CLogDetEstimator.html">LogDetEstimator</a> class to sample the log-determinant of the matrix.
End of explanation
"""
# the following method requires massive amount of memory, for demonstration purpose
# the following code is commented out and direct value obtained from running it once is used
# from modshogun import Statistics
# actual_logdet = Statistics.log_det(A)
actual_logdet = 7120357.73878
print 'Actual log(det(A)):', actual_logdet
plt.hist(estimates)
plt.plot([actual_logdet, actual_logdet], [0,len(estimates)], linewidth=3)
plt.show()
"""
Explanation: To verify the accuracy of the estimate, we compute exact log-determinant of A using Cholesky factorization using <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CStatistics.html#a9931a4ea72310b239efdc05503442525">Statistics::log_det</a> method.
End of explanation
"""
from scipy.sparse import csc_matrix
from scipy.sparse import identity
m = mmread('../../../data/logdet/west0479.mtx')
# computing a spd with added ridge
B = csc_matrix(m.transpose() * m + identity(m.shape[0]) * 1000.0)
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(1,2,1)
ax.set_title('B')
ax.spy(B, precision = 1e-5, marker = '.', markersize = 2.0)
ax = fig.add_subplot(1,2,2)
ax.set_title('lower Cholesky factor')
dense_matrix = B.todense()
L = np.linalg.cholesky(dense_matrix)
ax.spy(csc_matrix(L), precision = 1e-5, marker = '.', markersize = 2.0)
plt.show()
op = RealSparseMatrixOperator(B)
eigen_solver = LanczosEigenSolver(op)
# computing log-det estimates using probing sampler
probing_sampler = ProbingSampler(op)
cgm.set_iteration_limit(500)
op_func = LogRationalApproximationCGM(op, engine, eigen_solver, cgm, 1E-5)
log_det_estimator = LogDetEstimator(probing_sampler, op_func, engine)
num_probing_estimates = 100
probing_estimates = log_det_estimator.sample(num_probing_estimates)
# computing log-det estimates using Gaussian sampler
from modshogun import NormalSampler, Statistics
num_colors = probing_sampler.get_num_samples()
normal_sampler = NormalSampler(op.get_dimension())
log_det_estimator = LogDetEstimator(normal_sampler, op_func, engine)
num_normal_estimates = num_probing_estimates * num_colors
normal_estimates = log_det_estimator.sample(num_normal_estimates)
# average in groups of n_effective_samples
effective_estimates_normal = np.zeros(num_probing_estimates)
for i in range(num_probing_estimates):
idx = i * num_colors
effective_estimates_normal[i] = np.mean(normal_estimates[idx:(idx + num_colors)])
actual_logdet = Statistics.log_det(B)
print 'Actual log(det(B)):', actual_logdet
print 'Estimated log(det(B)) using probing sampler:', np.mean(probing_estimates)
print 'Estimated log(det(B)) using Gaussian sampler:', np.mean(effective_estimates_normal)
print 'Variance using probing sampler:', np.var(probing_estimates)
print 'Variance using Gaussian sampler:', np.var(effective_estimates_normal)
fig = plt.figure(figsize=(15, 4))
ax = fig.add_subplot(1,3,1)
ax.set_title('Probing sampler')
ax.plot(np.cumsum(probing_estimates)/(np.arange(len(probing_estimates))+1))
ax.plot([0,len(probing_estimates)], [actual_logdet, actual_logdet])
ax.legend(["Probing", "True"])
ax = fig.add_subplot(1,3,2)
ax.set_title('Gaussian sampler')
ax.plot(np.cumsum(effective_estimates_normal)/(np.arange(len(effective_estimates_normal))+1))
ax.plot([0,len(probing_estimates)], [actual_logdet, actual_logdet])
ax.legend(["Gaussian", "True"])
ax = fig.add_subplot(1,3,3)
ax.hist(probing_estimates)
ax.hist(effective_estimates_normal)
ax.plot([actual_logdet, actual_logdet], [0,len(probing_estimates)], linewidth=3)
plt.show()
"""
Explanation: <h2>Statistics</h2>
We use a smaller sparse-matrix, <a href="http://www.cise.ufl.edu/research/sparse/matrices/HB/west0479.html">'west0479'</a> in this section to demonstrate the benefits of using probing vectors over standard Gaussian vectors to sample the trace of matrix-logarithm. In the following we can easily observe the fill-in phenomena described earlier. Again, a ridge has been added to reduce the runtime for demonstration purpose.
End of explanation
"""
from scipy.io import loadmat
def get_Q_y_A(kappa):
# read the ozone data and create the matrix Q
ozone = loadmat('../../../data/logdet/ozone_data.mat')
GiCG = ozone["GiCG"]
G = ozone["G"]
C0 = ozone["C0"]
kappa = 13.1
Q = GiCG + 2 * (kappa ** 2) * G + (kappa ** 4) * C0
# also, added a ridge here
Q = Q + eye(Q.shape[0], Q.shape[1]) * 10000.0
plt.spy(Q, precision = 1e-5, marker = '.', markersize = 1.0)
plt.show()
# read y and A
y = ozone["y_ozone"]
A = ozone["A"]
return Q, y, A
def log_det(A):
op = RealSparseMatrixOperator(A)
engine = SerialComputationEngine()
eigen_solver = LanczosEigenSolver(op)
probing_sampler = ProbingSampler(op)
cgm = CGMShiftedFamilySolver()
cgm.set_iteration_limit(100)
op_func = LogRationalApproximationCGM(op, engine, eigen_solver, cgm, 1E-5)
log_det_estimator = LogDetEstimator(probing_sampler, op_func, engine)
num_estimates = 1
return np.mean(log_det_estimator.sample(num_estimates))
def log_likelihood(tau, kappa):
Q, y, A = get_Q_y_A(kappa)
n = len(y);
AtA = A.T.dot(A)
M = Q + tau * AtA;
# Computing log-determinants")
logdet1 = log_det(Q)
logdet2 = log_det(M)
first = 0.5 * logdet1 + 0.5 * n * np.log(tau) - 0.5 * logdet2
# computing the rest of the likelihood
second_a = -0.5 * tau * (y.T.dot(y))
second_b = np.array(A.T.dot(y))
from scipy.sparse.linalg import spsolve
second_b = spsolve(M, second_b)
second_b = A.dot(second_b)
second_b = y.T.dot(second_b)
second_b = 0.5 * (tau ** 2) * second_b
log_det_part = first
quadratic_part = second_a + second_b
const_part = -0.5 * n * np.log(2 * np.pi)
log_marignal_lik = const_part + log_det_part + quadratic_part
return log_marignal_lik
L = log_likelihood(1.0, 15.0)
print 'Log-likelihood estimate:', L
"""
Explanation: <h2>A motivational example - likelihood of the Ozone dataset</h2>
<p>In <a href="http://arxiv.org/abs/1306.4032">Girolami et. al. (2013)</a>, an interesting scenario is discussed where the log-likelihood of a model involving large spatial dataset is considered. The data, collected by a satellite consists of $N=173,405$ ozone measurements around the globe. The data is modelled using three stage hierarchical way -
$$y_{i}|\mathbf{x},\kappa,\tau\sim\mathcal{N}(\mathbf{Ax},\tau^{−1}\mathbf{I})$$
$$\mathbf{x}|\kappa\sim\mathcal{N}(\mathbf{0}, \mathbf{Q}(\kappa))$$
$$\kappa\sim\log_{2}\mathcal{N}(0, 100), \tau\sim\log_{2}\mathcal{N}(0, 100)$$
Where the precision matrix, $\mathbf{Q}$, of a Matern SPDE model, defined on a fixed traingulation of the globe, is sparse and the parameter $\kappa$ controls for the range at which correlations in the field are effectively zero (see Girolami et. al. for details). The log-likelihood estiamate of the posterior using this model is
$$2\mathcal{L}=2\log \pi(\mathbf{y}|\kappa,\tau)=C+\log(\text{det}(\mathbf{Q}(\kappa)))+N\log(\tau)−\log(\text{det}(\mathbf{Q}(\kappa)+\tau \mathbf{A}^{T}\mathbf{A}))− \tau\mathbf{y}^{T}\mathbf{y}+\tau^{2}\mathbf{y}^{T}\mathbf{A}(\mathbf{Q}(\kappa)+\tau\mathbf{A}^{T}\mathbf{A})^{−1}\mathbf{A}^{T}\mathbf{y}$$
In the expression, we have two terms involving log-determinant of large sparse matrices. The rational approximation approach described in the previous section can readily be applicable to estimate the log-likelihood. The following computation shows the usage of Shogun's log-determinant estimator for estimating this likelihood (code has been adapted from an open source library, <a href="https://github.com/karlnapf/ozone-roulette.git">ozone-roulette</a>, written by Heiko Strathmann, one of the authors of the original paper).
<b>Please note that we again added a ridge along the diagonal for faster execution of this example. Since the original matrix is badly conditioned, one needs to set the iteration limits very high for both the Eigen solver and the linear solver in absense of precondioning.</b>
End of explanation
"""
from modshogun import RealSparseMatrixOperator, ComplexDenseMatrixOperator
dim = 5
np.random.seed(10)
# create a random valued sparse matrix linear operator
A = csc_matrix(np.random.randn(dim, dim))
op = RealSparseMatrixOperator(A)
# creating a random vector
np.random.seed(1)
b = np.array(np.random.randn(dim))
v = op.apply(b)
print 'A.apply(b)=',v
# create a dense matrix linear operator
B = np.array(np.random.randn(dim, dim)).astype(complex)
op = ComplexDenseMatrixOperator(B)
print 'Dimension:', op.get_dimension()
"""
Explanation: <h2>Useful components</h2>
<p>As a part of the implementation of log-determinant estimator, a number of classes have been developed, which may come useful for several other occassions as well.
<h3>1. <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CLinearOperator.html">Linear Operators</a></h3>
All the linear solvers and Eigen solvers work with linear operators. Both real valued and complex valued operators are supported for dense/sparse matrix linear operators.
End of explanation
"""
from scipy.sparse import csc_matrix
from scipy.sparse import identity
from modshogun import ConjugateGradientSolver
# creating a random spd matrix
dim = 5
np.random.seed(10)
m = csc_matrix(np.random.randn(dim, dim))
a = m.transpose() * m + csc_matrix(np.identity(dim))
Q = RealSparseMatrixOperator(a)
# creating a random vector
y = np.array(np.random.randn(dim))
# solve the system Qx=y
# the argument is set as True to gather convergence statistics (default is False)
cg = ConjugateGradientSolver(True)
cg.set_iteration_limit(20)
x = cg.solve(Q,y)
print 'x:',x
# verifying the result
print 'y:', y
print 'Qx:', Q.apply(x)
residuals = cg.get_residuals()
plt.plot(residuals)
plt.show()
"""
Explanation: <h3>2. <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CLinearSolver.html">Linear Solvers</a></h3>
<p> Conjugate Gradient based iterative solvers, that construct the Krylov subspace in their iteration by computing matrix-vector products are most useful for solving sparse linear systems. Here is an overview of CG based solvers that are currently available in Shogun.</p>
<h4> <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CConjugateGradientSolver.html">Conjugate Gradient Solver</a></h4>
This solver solves for system $\mathbf{Qx}=\mathbf{y}$, where $\mathbf{Q}$ is real-valued spd linear operator (e.g. dense/sparse matrix operator), and $\mathbf{y}$ is real vector.
End of explanation
"""
from modshogun import ComplexSparseMatrixOperator
from modshogun import ConjugateOrthogonalCGSolver
# creating a random spd matrix
dim = 5
np.random.seed(10)
m = csc_matrix(np.random.randn(dim, dim))
a = m.transpose() * m + csc_matrix(np.identity(dim))
a = a.astype(complex)
# adding a complex entry along the diagonal
for i in range(0, dim):
a[i,i] += complex(np.random.randn(), np.random.randn())
Q = ComplexSparseMatrixOperator(a)
z = np.array(np.random.randn(dim))
# solve for the system Qx=z
cocg = ConjugateOrthogonalCGSolver(True)
cocg.set_iteration_limit(20)
x = cocg.solve(Q, z)
print 'x:',x
# verifying the result
print 'z:',z
print 'Qx:',np.real(Q.apply(x))
residuals = cocg.get_residuals()
plt.plot(residuals)
plt.show()
"""
Explanation: <h4><a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CConjugateOrthogonalCGSolver.html">Conjugate Orthogonal CG Solver</a></h4>
Solves for systems $\mathbf{Qx}=\mathbf{z}$, where $\mathbf{Q}$ is symmetric but non-Hermitian (i.e. having complex entries in its diagonal) and $\mathbf{z}$ is real valued vector.
End of explanation
"""
from modshogun import CGMShiftedFamilySolver
cgm = CGMShiftedFamilySolver()
# creating a random spd matrix
dim = 5
np.random.seed(10)
m = csc_matrix(np.random.randn(dim, dim))
a = m.transpose() * m + csc_matrix(np.identity(dim))
Q = RealSparseMatrixOperator(a)
# creating a random vector
v = np.array(np.random.randn(dim))
# number of shifts (will be equal to the number of contour points)
num_shifts = 3;
# generating some random shifts
shifts = []
for i in range(0, num_shifts):
shifts.append(complex(np.random.randn(), np.random.randn()))
sigma = np.array(shifts)
print 'Shifts:', sigma
# generating some random weights
weights = []
for i in range(0, num_shifts):
weights.append(complex(np.random.randn(), np.random.randn()))
alpha = np.array(weights)
print 'Weights:',alpha
# solve for the systems
cgm = CGMShiftedFamilySolver(True)
cgm.set_iteration_limit(20)
x = cgm.solve_shifted_weighted(Q, v, sigma, alpha)
print 'x:',x
residuals = cgm.get_residuals()
plt.plot(residuals)
plt.show()
# verifying the result with cocg
x_s = np.array([0+0j] * dim)
for i in range(0, num_shifts):
a_s = a.astype(complex)
for j in range(0, dim):
# moving the complex shift inside the operator
a_s[j,j] += sigma[i]
Q_s = ComplexSparseMatrixOperator(a_s)
# multiplying the result with weight
x_s += alpha[i] * cocg.solve(Q_s, v)
print 'x\':', x_s
"""
Explanation: <h4><a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CCGMShiftedFamilySolver.html">CG-M Shifted Family Solver</a></h4>
Solves for systems with real valued spd matrices with complex shifts. For using it with log-det, an option to specify the weight of each solution is also there. The solve_shifted_weighted method returns $\sum\alpha_{l}\mathbf{x}{l}$ where $\mathbf{x}{l}=(\mathbf{A}+\sigma_{l}\mathbf{I})^{-1}\mathbf{y}$, $\sigma,\alpha\in\mathbb{C}$, $\mathbf{y}\in\mathbb{R}$.
End of explanation
"""
from modshogun import DirectSparseLinearSolver
# creating a random spd matrix
dim = 5
np.random.seed(10)
m = csc_matrix(np.random.randn(dim, dim))
a = m.transpose() * m + csc_matrix(np.identity(dim))
Q = RealSparseMatrixOperator(a)
# creating a random vector
y = np.array(np.random.randn(dim))
# solve the system Qx=y
chol = DirectSparseLinearSolver()
x = chol.solve(Q,y)
print 'x:',x
# verifying the result
print 'y:', y
print 'Qx:', Q.apply(x)
"""
Explanation: Apart from iterative solvers, a few more triangular solvers are added.
<h4><a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CDirectSparseLinearSolver.html">Direct Sparse Linear Solver</a></h4>
This uses sparse Cholesky to solve for linear systems $\mathbf{Qx}=\mathbf{y}$, where $\mathbf{Q}$ is real-valued spd linear operator (e.g. dense/sparse matrix operator), and $\mathbf{y}$ is real vector.
End of explanation
"""
from modshogun import DirectLinearSolverComplex
# creating a random spd matrix
dim = 5
np.random.seed(10)
m = np.array(np.random.randn(dim, dim))
a = m.transpose() * m + csc_matrix(np.identity(dim))
a = a.astype(complex)
# adding a complex entry along the diagonal
for i in range(0, dim):
a[i,i] += complex(np.random.randn(), np.random.randn())
Q = ComplexDenseMatrixOperator(a)
z = np.array(np.random.randn(dim))
# solve for the system Qx=z
solver = DirectLinearSolverComplex()
x = solver.solve(Q, z)
print 'x:',x
# verifying the result
print 'z:',z
print 'Qx:',np.real(Q.apply(x))
"""
Explanation: <h4><a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CDirectLinearSolverComplex.html">Direct Linear Solver for Complex</a></h4>
This solves linear systems $\mathbf{Qx}=\mathbf{y}$, where $\mathbf{Q}$ is complex-valued dense matrix linear operator, and $\mathbf{y}$ is real vector.
End of explanation
"""
|
flowmatters/veneer-py | doc/examples/network/TopologicalQueries.ipynb | isc | import veneer
%matplotlib inline
v = veneer.Veneer()
"""
Explanation: Network queries
veneer-py supports a number topological queries on the Source node-link network and including identifying outlets, upstream and downstream nodes, links and catchments.
These queries operate on the network object returned by v.network(). The topological queriers are not available on the dataframe version (created with .as_dataframe()), although in some cases the results of the previous queries can be carried over to the dataframe.
End of explanation
"""
network = v.network()
"""
Explanation: Different forms of the network.
The node-link network that we get from Source includes topological information, in addition to the geometries of the various nodes, links and catchments, and their attributes, such as node names.
When we initial retrieve the network, with v.network() we get an object that includes a number of queries based on this topology.
Note: These queries are not implemented on the dataframe of the network, created with v.network().as_dataframe(). However you can call as_dataframe() on the result of some of the topological queries.
End of explanation
"""
outlets = network.outlet_nodes().as_dataframe()
outlets[:10]
"""
Explanation: eg, find all outlet nodes
End of explanation
"""
upstream_features = network.upstream_features('/network/nodes/214').as_dataframe()
upstream_features
upstream_features.plot()
"""
Explanation: Feature id
Other topological queries are based on the id attribute of features in the network. For example /network/nodes/187
End of explanation
"""
network.partition?
gauge_names = network['features'].find_by_icon('/resources/GaugeNodeModel')._select(['name'])
gauge_names
network.partition(gauge_names,'downstream_gauge')
dataframe = network.as_dataframe()
dataframe[:10]
## Path between two features
network.path_between?
network.path_between('/network/catchments/20797','/network/nodes/56').as_dataframe()
"""
Explanation: Partitioning the network
The network.partition method can be very useful for a range of parameterisation and reporting needs.
partition groups all features (nodes, links and catchments) in the network based on which of a series of key nodes those features drain through.
parition adds a new property to each feature, naming the relevant key node (or the outlet node if none of the key nodes are downstream of a particular feature).
Note: You can name the property used to identify the key nodes, which means you can run partition multiple times to identify different groupings within the network
End of explanation
"""
|
yafeunteun/wikipedia-spam-classifier | notebooks/.ipynb_checkpoints/feature_engineering-checkpoint.ipynb | mit | import sys
sys.path.append("/usr/local/lib/python3.4/dist-packages/")
sys.path.append("/usr/local/lib/python3.4/dist-packages/revscoring/")
sys.path.append("/usr/local/lib/python3.4/dist-packages/more_itertools/")
sys.path.append("/usr/local/lib/python3.4/dist-packages/deltas/")
!sudo pip3 install dependencies deltas
from revscoring.extractors import api
import mwapi
extractor = api.Extractor(mwapi.Session("https://en.wikipedia.org",
user_agent="Revscoring feature demo [email protected]"))
"""
Explanation: Feature engineering
This notebook will teach you how to extract feature values using revscoring's built-in feature library as well as to build your own features.
Set up the feature extractor
This line constructs a "feature extractor" that uses Wikipedia's API. We'll need to use it later, so we'll construct it first.
End of explanation
"""
from revscoring import Feature
chars_added_ratio_explicit = Feature(
"chars_added_ratio_explicit",
lambda a,c: a/max(c, 1), # Prevents divide by zero
depends_on=[wikitext.revision.diff.chars_added,
wikitext.revision.chars],
returns=float)
list(extractor.extract(123456789, [chars_added_ratio_explicit]))
"""
Explanation: Extract features
The following line demonstrates a simple feature extraction. We'll extract two features: wikitext.revision.chars, the number of characters added; and wikitext.revision.diff.chars_added, the number of characters in the entire revision. Note that we wrap the call in a list() because it returns a generator.
Defining a custom feature
The next block defines a new feature and sets the dependencies to be the two features we just extracted. This feature represents the proportion of characters in the current version of the page that the current edit is responsible for adding.
End of explanation
"""
from revscoring.features import modifiers
chars_added_ratio_implicit = (wikitext.revision.diff.chars_added /
modifiers.max(wikitext.revision.chars, 1))
list(extractor.extract(123456789, [chars_added_ratio_implicit]))
"""
Explanation: There's easier ways that we can do this though. revscoring.Feature overloads simple mathematical operators to allow you to do math with features and get a feature returned. revscoring.features.modifiers contains a set of basic functions that do the same. This code roughly corresponds to what's going on above.
End of explanation
"""
chars_added_ratio_explicit, chars_added_ratio_implicit
"""
Explanation: While the implicit pattern is quicker and easier than the explicit pattern, it's name can not be customized.
End of explanation
"""
list(extractor.extract(662953550, [wikitext.revision.diff.datasources.segments_added,
wikitext.revision.diff.datasources.segments_removed]))
"""
Explanation: Extracting datasources
There's a also a set of revscoring.Datasource's that are part of the dependency injection system. These "datasources" represent the data needed for feature generation. We can extract them just like revscoring.Feature's.
End of explanation
"""
import mwparserfromhell as mwp
templates_added = Feature("templates_added",
lambda add_segments: sum(len(mwp.parse(s).filter_templates()) > 0 for s in add_segments),
depends_on=[wikitext.revision.diff.datasources.segments_added],
returns=int)
list(extractor.extract(662953550, [templates_added]))
"""
Explanation: OK. Let's define a new feature for counting the number of templates added. I'll make use of mwparserfromhell to do this. See the docs.
End of explanation
"""
from revscoring.dependencies import draw
print(draw(templates_added))
"""
Explanation: Debugging
There's some facilities in place to help you make sense of issues when they arise. The most important is the draw function.
End of explanation
"""
print(draw(wikitext.revision.diff.number_prop_delta_sum))
"""
Explanation: In the tree structure above, you can see how our new feature depends on wikitext.revision.diff.segments_added which depends on wikitext.revision.diff.operations which depends (as you might imagine) on the current and parent revision. Some features can get quite complicated.
End of explanation
"""
try:
list(extractor.extract(2, [wikitext.revision.diff.words_added]))
except Exception as e:
print(e)
try:
list(extractor.extract(262721924, [wikitext.revision.diff.words_added]))
except Exception as e:
print(e)
from revscoring.features import revision_oriented
try:
list(extractor.extract(172665816, [revision_oriented.revision.comment_matches("foo")]))
except Exception as e:
print(e)
from revscoring.features import temporal
try:
list(extractor.extract(591839757, [revision_oriented.revision.user.text_matches("foo")]))
except Exception as e:
print(e)
"""
Explanation: The dependency injection system will only solve a unique dependency once for a given tree. So, even though <revision.parent.text> appears twice above, it will only be extracted once and then cached. This allows for multiple features to share large sections of their dependency trees -- and therefor minimize resource usage.
Errors during extraction
A revscoring.Extractor should be expected to throw an exception if it cannot find a missing resource during extraction. These messages are intented to clearly convey what went wrong.
End of explanation
"""
|
fujii-team/GPinv | notebooks/Regression example.ipynb | apache-2.0 | import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import tensorflow as tf
# Import GPinv
import GPinv
# Import GPflow as comparison
import GPflow
"""
Explanation: An example of GPinv: For the regression purpose.
This notebook briefly shows a regression usage by GPflow
Keisuke Fujii 7th Oct. 2016
In this notebook, we show some examples of GPinv for the regression usage.
We show
Simple regression, that can be done also by usual Gaussian Process.
Regression with non-Gaussian likelihood.
More flexible regression with the expert model.
Import several libraries including GPinv
End of explanation
"""
X = np.linspace(0,6,40).reshape(-1,1)
Y = np.sin(X) + np.random.randn(40,1)*0.1
"""
Explanation: Simple regression
Synthetic data
End of explanation
"""
m_gpflow = GPflow.gpr.GPR(X, Y, GPflow.kernels.RBF(1))
m_gpflow.optimize()
X_new = np.linspace(-0.1,6.1,40).reshape(-1,1)
f_mu, f_var = m_gpflow.predict_f(X_new)
plt.plot(X, Y, 'go')
plt.fill_between(X_new.flatten(), (f_mu+2*np.sqrt(f_var)).flatten(), (f_mu-2*np.sqrt(f_var)).flatten(), alpha=0.5)
plt.plot(X_new, f_mu,'b')
"""
Explanation: GPflow.gpr.GPR
End of explanation
"""
class GaussianLikelihood(GPinv.likelihoods.Likelihood):
def __init__(self):
GPinv.likelihoods.Likelihood.__init__(self)
# variance parameter is assigned as GPinv.param.Param
self.variance = GPinv.param.Param(1, GPinv.transforms.positive)
def logp(self, F, Y):
return GPinv.densities.gaussian(Y, F, self.variance)
# In GPinv, kernel object is slightly different from that in GPflow.
# The GPinv kernel requires output_dim, which is the output dimension of the kernel.
# In this case, it is 1.
m_gpinv = GPinv.stvgp.StVGP(X, Y, GPinv.kernels.RBF(1, output_dim=1), likelihood=GaussianLikelihood())
# StVGP uses stochastic approximation of the evidence lower bound, we need stochastic optimizer by tensorflow
trainer = tf.train.AdamOptimizer(learning_rate=0.005)
m_gpinv.optimize(trainer, maxiter=2000)
# GPinv model also has predict_f method.
f_mu2, f_var2 = m_gpinv.predict_f(X_new)
"""
Explanation: GPinv.stvgp
In GPinv, we need custom likelihood, where at least the conditional likelihood of the data (logp) is specified.
The likelihood class should inherite GPinv.likelihood.Likelihood.
End of explanation
"""
plt.plot(X_new, f_mu+2*np.sqrt(f_var), '--b',
X_new, f_mu-2*np.sqrt(f_var), '--b')
plt.plot(X_new, f_mu, '-b', label='GPflow.gpr')
plt.plot(X_new, f_mu2+2*np.sqrt(f_var2), '--r',
X_new, f_mu2-2*np.sqrt(f_var2), '--r')
plt.plot(X_new, f_mu2, '-r', label='GPinv.stvgp')
plt.legend()
"""
Explanation: Compare results
End of explanation
"""
Y = np.sin(X) + np.random.randn(X.shape[0],X.shape[1]) * 0.1
Y[np.random.randint(0,X.shape[0],5),0] = np.random.randn(5) # Add non-Gaussian noise
"""
Explanation: These two models show almost identical results,
although GPinv.stvgp needs more calculation time.
Regression with non-Gaussian likelihood
GPflow.vgp
End of explanation
"""
m_gpflow = GPflow.vgp.VGP(X, Y, GPflow.kernels.RBF(1), likelihood=GPflow.likelihoods.StudentT())
_= m_gpflow.optimize()
X_new = np.linspace(-0.1,6.1,40).reshape(-1,1)
f_mu, f_var = m_gpflow.predict_f(X_new)
plt.plot(X, Y, 'go')
plt.fill_between(X_new.flatten(), (f_mu+2*np.sqrt(f_var)).flatten(), (f_mu-2*np.sqrt(f_var)).flatten(), alpha=0.5)
plt.plot(X_new, f_mu,'b')
"""
Explanation: GPflow.vgp.VGP
For non-Gaussian likelihood, GPflow.vgp.VGP can be used.
We assume Student's-T likelihood.
End of explanation
"""
class StudentLikelihood(GPinv.likelihoods.Likelihood):
def __init__(self):
GPinv.likelihoods.Likelihood.__init__(self)
# variance parameter is assigned as GPinv.param.Param
self.variance = GPinv.param.Param(1, GPinv.transforms.positive)
def logp(self, F, Y):
return GPinv.densities.student_t(Y, F, self.variance, deg_free=np.ones(1)*3)
# In GPinv, kernel object is slightly different from that in GPflow.
# The GPinv kernel requires output_dim, which is the output dimension of the kernel.
# In this case, it is 1.
m_gpinv = GPinv.stvgp.StVGP(X, Y, GPinv.kernels.RBF(1, output_dim=1), likelihood=StudentLikelihood())
# StVGP uses stochastic approximation of the evidence lower bound, we need stochastic optimizer by tensorflow
trainer = tf.train.AdamOptimizer(learning_rate=0.002)
m_gpinv.optimize(trainer, maxiter=2000)
# GPinv model also has predict_f method.
f_mu2, f_var2 = m_gpinv.predict_f(X_new)
"""
Explanation: GPinv.stvgp
In GPinv, we need custom likelihood, where at least the conditional likelihood of the data (logp) is specified.
The likelihood class should inherite GPinv.likelihood.Likelihood.
End of explanation
"""
plt.plot(X_new, f_mu+2*np.sqrt(f_var), '--b',
X_new, f_mu-2*np.sqrt(f_var), '--b')
plt.plot(X_new, f_mu, '-b', label='GPflow.gpr')
plt.plot(X_new, f_mu2+2*np.sqrt(f_var2), '--r',
X_new, f_mu2-2*np.sqrt(f_var2), '--r')
plt.plot(X_new, f_mu2, '-r', label='GPinv.stvgp')
plt.legend()
"""
Explanation: Compare results
End of explanation
"""
X = np.linspace(0,6,100).reshape(-1,1)
Y = np.sin(0.1*X*X*X) + np.random.randn(*X.shape)*0.1
plt.plot(X,Y,'o')
"""
Explanation: These two models show very similar results.
More flexible regression.
Here we demonstrate more flexible regression.
In this example, we try to fit the data where the length scale varies in position.
End of explanation
"""
m_gpflow = GPflow.gpr.GPR(X, Y, GPflow.kernels.RBF(1))
m_gpflow.optimize()
X_new = np.linspace(-0.1,6.1,200).reshape(-1,1)
f_mu, f_var = m_gpflow.predict_f(X_new)
plt.plot(X, Y, 'go')
plt.fill_between(X_new.flatten(), (f_mu+2*np.sqrt(f_var)).flatten(), (f_mu-2*np.sqrt(f_var)).flatten(), alpha=0.5)
plt.plot(X_new, f_mu,'b')
"""
Explanation: GPflow.gpr
End of explanation
"""
class ExpertLikelihood(GPinv.likelihoods.Likelihood):
def __init__(self):
GPinv.likelihoods.Likelihood.__init__(self)
self.variance = GPinv.param.Param(1, GPinv.transforms.positive)
def sample_F(self, F):
"""
F has N x n x 3 shape,
where N is the number of samples, n is the number of data points, 3 is number or latent functions.
"""
N = tf.shape(F)[0]
n = tf.shape(F)[1]
# latent functions
fs, fl, r = tf.unpack(F, axis=-1, num=3) # divide into 3 functions with shape [N,n]
# calculate f from fs, fl, r
sig = tf.sigmoid(r)
return sig * fs + (1-sig) * fl
def logp(self, F, Y):
"""
F has N x n x 3 shape,
Y has n x 1 shape
"""
N = tf.shape(F)[0]
f = tf.expand_dims(self.sample_F(F), -1) # [N,n] -> [N,n,1]
# expand Y to match f shape
Y = tf.tile(tf.expand_dims(Y, 0), [N,1,1])
return GPinv.densities.gaussian(Y, f, self.variance)
"""
Explanation: GPinv.stvgp
In GPinv, we can use more flexible model.
Here, we demonstrate the expart function model by GPinv.
We assume three latent functions, one of which has the shorter lengthscale ($f_s(x)$),
another with longer lengthscale ($f_l(x)$).
The last one has largest lengthscale, $r(x)$ and represents the fraction of $f_s(x)$ and $f_l(x)$ to be contributed with respect to the position,
i.e.
$$
f(x) = \frac{1}{1+e^{r(x)}} f_s(x) + \frac{1}{1+e^{-r(x)}} f_l(x)
$$
Likelihood
Such complex relations between multiple latent functions can be implemented in the Likelihood class.
End of explanation
"""
kernel = GPinv.kernels.Stack([GPinv.kernels.RBF(1,1, lengthscales=0.2), # initialize to shorter value
GPinv.kernels.RBF(1,1, lengthscales=1.0), # initialize to longer value
GPinv.kernels.RBF(1,1, lengthscales=2.0)])
"""
Explanation: Here, sample_F function is useful to sample from the posterior in the prediction phase.
Kernel
We use 3 kinds of kernel with respect to the 3 latent function.
This can be done by stack kernel, which stack multiple kernels vertically.
End of explanation
"""
m_gpinv = GPinv.stvgp.StVGP(X, Y, kernel, ExpertLikelihood(),
num_latent = 3, # number of latent function should be explicity specified.
num_samples=10)
trainer = tf.train.AdamOptimizer(learning_rate=0.002)
# This function visualizes the iteration.
from IPython import display
logf = []
def logger(x):
if (logger.i % 10) == 0:
obj = -m_gpinv._objective(x)[0]
logf.append(obj)
# display
if (logger.i % 100) ==0:
plt.clf()
plt.plot(logf, '-ko', markersize=3, linewidth=1)
plt.ylabel('ELBO')
plt.xlabel('iteration')
display.display(plt.gcf())
display.clear_output(wait=True)
logger.i+=1
logger.i = 1
import time
# start time
start_time = time.time()
plt.figure(figsize=(6,3))
# optimization by tf.train
_= m_gpinv.optimize(trainer, maxiter=5000, callback=logger)
display.clear_output(wait=True)
print('Ellapsed Time is', time.time()-start_time, ' (s)')
X_new = np.linspace(-0.1,6.1,200).reshape(-1,1)
f_mu2, f_var2= m_gpinv.predict_f(X_new)
plt.figure(figsize=(12,3))
plt.subplot(1,3,1)
plt.title('$f_s$')
plt.fill_between(X_new.flatten(), f_mu2[:,0]+2.0*np.sqrt(f_var2[:,0]), f_mu2[:,0]-2.0*np.sqrt(f_var2[:,0]), alpha=0.5)
plt.plot(X_new.flatten(), f_mu2[:,0])
plt.subplot(1,3,2)
plt.title('$f_l$')
plt.fill_between(X_new.flatten(), f_mu2[:,1]+2.0*np.sqrt(f_var2[:,1]), f_mu2[:,1]-2.0*np.sqrt(f_var2[:,1]), alpha=0.5)
plt.plot(X_new.flatten(), f_mu2[:,1])
plt.subplot(1,3,3)
plt.title('$r$')
plt.fill_between(X_new.flatten(), f_mu2[:,2]+2.0*np.sqrt(f_var2[:,2]), f_mu2[:,2]-2.0*np.sqrt(f_var2[:,2]), alpha=0.5)
plt.plot(X_new.flatten(), f_mu2[:,2])
"""
Explanation: model
End of explanation
"""
# Here we 101 samples
sample_F = m_gpinv.sample_F(101)
# plot 5-95 % range
plt.fill_between(X.flatten(),
np.percentile(sample_F, 5, axis=0),
np.percentile(sample_F, 95, axis=0), alpha=0.5)
plt.plot(X.flatten(), np.median(sample_F, axis=0))
plt.plot(X, Y, 'o')
"""
Explanation: Sample from the variational posterior
The samples from the posterior can be estimated by sample_F method.
End of explanation
"""
plt.figure(figsize=(10,4))
plt.plot(X_new, f_mu+2*np.sqrt(f_var), '--b',
X_new, f_mu-2*np.sqrt(f_var), '--b')
plt.plot(X_new, f_mu, '-b', lw=2,label='GPflow.gpr')
plt.plot(X, np.percentile(sample_F, 5, axis=0), '--r',
X, np.percentile(sample_F,95, axis=0), '--r')
plt.plot(X, np.median(sample_F, axis=0), '-r', lw=2,label='GPinv.stvgp')
plt.legend(loc='best')
"""
Explanation: Comparison between GPflow.gpr and GPinv.stvgp
End of explanation
"""
|
Kaggle/learntools | notebooks/nlp/raw/ex1.ipynb | apache-2.0 | import pandas as pd
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.nlp.ex1 import *
print('Setup Complete')
"""
Explanation: Basic Text Processing with Spacy
You're a consultant for DelFalco's Italian Restaurant.
The owner asked you to identify whether there are any foods on their menu that diners find disappointing.
<img src="https://i.imgur.com/8DZunAQ.jpg" alt="Meatball Sub" width="250"/>
Before getting started, run the following cell to set up code checking.
End of explanation
"""
# Load in the data from JSON file
data = pd.read_json('../input/nlp-course/restaurant.json')
data.head()
"""
Explanation: The business owner suggested you use diner reviews from the Yelp website to determine which dishes people liked and disliked. You pulled the data from Yelp. Before you get to analysis, run the code cell below for a quick look at the data you have to work with.
End of explanation
"""
menu = ["Cheese Steak", "Cheesesteak", "Steak and Cheese", "Italian Combo", "Tiramisu", "Cannoli",
"Chicken Salad", "Chicken Spinach Salad", "Meatball", "Pizza", "Pizzas", "Spaghetti",
"Bruchetta", "Eggplant", "Italian Beef", "Purista", "Pasta", "Calzones", "Calzone",
"Italian Sausage", "Chicken Cutlet", "Chicken Parm", "Chicken Parmesan", "Gnocchi",
"Chicken Pesto", "Turkey Sandwich", "Turkey Breast", "Ziti", "Portobello", "Reuben",
"Mozzarella Caprese", "Corned Beef", "Garlic Bread", "Pastrami", "Roast Beef",
"Tuna Salad", "Lasagna", "Artichoke Salad", "Fettuccini Alfredo", "Chicken Parmigiana",
"Grilled Veggie", "Grilled Veggies", "Grilled Vegetable", "Mac and Cheese", "Macaroni",
"Prosciutto", "Salami"]
"""
Explanation: The owner also gave you this list of menu items and common alternate spellings.
End of explanation
"""
# Check your answer (Run this code cell to receive credit!)
q_1.solution()
"""
Explanation: Step 1: Plan Your Analysis
Given the data from Yelp and the list of menu items, do you have any ideas for how you could find which menu items have disappointed diners?
Think about your answer. Then run the cell below to see one approach.
End of explanation
"""
import spacy
from spacy.matcher import PhraseMatcher
index_of_review_to_test_on = 14
text_to_test_on = data.text.iloc[index_of_review_to_test_on]
# Load the SpaCy model
nlp = spacy.blank('en')
# Create the tokenized version of text_to_test_on
review_doc = ____
# Create the PhraseMatcher object. The tokenizer is the first argument. Use attr = 'LOWER' to make consistent capitalization
matcher = PhraseMatcher(nlp.vocab, attr='LOWER')
# Create a list of tokens for each item in the menu
menu_tokens_list = [____ for item in menu]
# Add the item patterns to the matcher.
# Look at https://spacy.io/api/phrasematcher#add in the docs for help with this step
# Then uncomment the lines below
#
#matcher.add("MENU", # Just a name for the set of rules we're matching to
# ____
# )
# Find matches in the review_doc
# matches = ____
# Uncomment to check your work
#q_2.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q_2.hint()
#_COMMENT_IF(PROD)_
q_2.solution()
"""
Explanation: Step 2: Find items in one review
You'll pursue this plan of calculating average scores of the reviews mentioning each menu item.
As a first step, you'll write code to extract the foods mentioned in a single review.
Since menu items are multiple tokens long, you'll use PhraseMatcher which can match series of tokens.
Fill in the ____ values below to get a list of items matching a single menu item.
End of explanation
"""
# for match in matches:
# print(f"Token number {match[1]}: {review_doc[match[1]:match[2]]}")
#%%RM_IF(PROD)%%
import spacy
from spacy.matcher import PhraseMatcher
index_of_review_to_test_on = 14
text_to_test_on = data.text.iloc[index_of_review_to_test_on]
nlp = spacy.blank('en')
review_doc = nlp(text_to_test_on)
matcher = PhraseMatcher(nlp.vocab, attr='LOWER')
menu_tokens_list = [nlp(item) for item in menu]
matcher.add("MENU", menu_tokens_list)
matches = matcher(review_doc)
# Uncomment when checking code is complete
q_2.assert_check_passed()
"""
Explanation: After implementing the above cell, uncomment the following cell to print the matches.
End of explanation
"""
from collections import defaultdict
# item_ratings is a dictionary of lists. If a key doesn't exist in item_ratings,
# the key is added with an empty list as the value.
item_ratings = defaultdict(list)
for idx, review in data.iterrows():
doc = ____
# Using the matcher from the previous exercise
matches = ____
# Create a set of the items found in the review text
found_items = ____
# Update item_ratings with rating for each item in found_items
# Transform the item strings to lowercase to make it case insensitive
____
q_3.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q_3.hint()
#_COMMENT_IF(PROD)_
q_3.solution()
#%%RM_IF(PROD)%%
from collections import defaultdict
item_ratings = defaultdict(list)
for idx, review in data.iterrows():
doc = nlp(review.text)
matches = matcher(doc)
found_items = set([doc[match[1]:match[2]].text.lower() for match in matches])
for item in found_items:
item_ratings[item].append(review.stars)
q_3.assert_check_passed()
"""
Explanation: Step 3: Matching on the whole dataset
Now run this matcher over the whole dataset and collect ratings for each menu item. Each review has a rating, review.stars. For each item that appears in the review text (review.text), append the review's rating to a list of ratings for that item. The lists are kept in a dictionary item_ratings.
To get the matched phrases, you can reference the PhraseMatcher documentation for the structure of each match object:
A list of (match_id, start, end) tuples, describing the matches. A match tuple describes a span doc[start:end]. The match_id is the ID of the added match pattern.
End of explanation
"""
# Calculate the mean ratings for each menu item as a dictionary
mean_ratings = ____
# Find the worst item, and write it as a string in worst_item. This can be multiple lines of code if you want.
worst_item = ____
q_4.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q_4.hint()
#_COMMENT_IF(PROD)_
q_4.solution()
# After implementing the above cell, uncomment and run this to print
# out the worst item, along with its average rating.
#print(worst_item)
#print(mean_ratings[worst_item])
#%%RM_IF(PROD)%%
mean_ratings = {item: sum(ratings)/len(ratings) for item, ratings in item_ratings.items()}
worst_item = sorted(mean_ratings, key=mean_ratings.get)[0]
q_4.assert_check_passed()
"""
Explanation: Step 4: What's the worst reviewed item?
Using these item ratings, find the menu item with the worst average rating.
End of explanation
"""
counts = {item: len(ratings) for item, ratings in item_ratings.items()}
item_counts = sorted(counts, key=counts.get, reverse=True)
for item in item_counts:
print(f"{item:>25}{counts[item]:>5}")
"""
Explanation: Step 5: Are counts important here?
Similar to the mean ratings, you can calculate the number of reviews for each item.
End of explanation
"""
sorted_ratings = sorted(mean_ratings, key=mean_ratings.get)
print("Worst rated menu items:")
for item in sorted_ratings[:10]:
print(f"{item:20} Ave rating: {mean_ratings[item]:.2f} \tcount: {counts[item]}")
print("\n\nBest rated menu items:")
for item in sorted_ratings[-10:]:
print(f"{item:20} Ave rating: {mean_ratings[item]:.2f} \tcount: {counts[item]}")
"""
Explanation: Here is code to print the 10 best and 10 worst rated items. Look at the results, and decide whether you think it's important to consider the number of reviews when interpreting scores of which items are best and worst.
End of explanation
"""
# Check your answer (Run this code cell to receive credit!)
q_5.solution()
"""
Explanation: Run the following line after you've decided your answer.
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.19/_downloads/ff83425ee773d1d588a6994e5560c06c/plot_mne_dspm_source_localization.ipynb | bsd-3-clause | import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
"""
Explanation: Source localization with MNE/dSPM/sLORETA/eLORETA
The aim of this tutorial is to teach you how to compute and apply a linear
inverse method such as MNE/dSPM/sLORETA/eLORETA on evoked/raw/epochs data.
End of explanation
"""
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname) # already has an average reference
events = mne.find_events(raw, stim_channel='STI 014')
event_id = dict(aud_l=1) # event trigger and conditions
tmin = -0.2 # start of each epoch (200ms before the trigger)
tmax = 0.5 # end of each epoch (500ms after the trigger)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
baseline = (None, 0) # means from the first instant to t = 0
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=('meg', 'eog'), baseline=baseline, reject=reject)
"""
Explanation: Process MEG data
End of explanation
"""
noise_cov = mne.compute_covariance(
epochs, tmax=0., method=['shrunk', 'empirical'], rank=None, verbose=True)
fig_cov, fig_spectra = mne.viz.plot_cov(noise_cov, raw.info)
"""
Explanation: Compute regularized noise covariance
For more details see tut_compute_covariance.
End of explanation
"""
evoked = epochs.average().pick('meg')
evoked.plot(time_unit='s')
evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag',
time_unit='s')
# Show whitening
evoked.plot_white(noise_cov, time_unit='s')
del epochs # to save memory
"""
Explanation: Compute the evoked response
Let's just use MEG channels for simplicity.
End of explanation
"""
# Read the forward solution and compute the inverse operator
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fwd = mne.read_forward_solution(fname_fwd)
# make an MEG inverse operator
info = evoked.info
inverse_operator = make_inverse_operator(info, fwd, noise_cov,
loose=0.2, depth=0.8)
del fwd
# You can write it to disk with::
#
# >>> from mne.minimum_norm import write_inverse_operator
# >>> write_inverse_operator('sample_audvis-meg-oct-6-inv.fif',
# inverse_operator)
"""
Explanation: Inverse modeling: MNE/dSPM on evoked and raw data
End of explanation
"""
method = "dSPM"
snr = 3.
lambda2 = 1. / snr ** 2
stc, residual = apply_inverse(evoked, inverse_operator, lambda2,
method=method, pick_ori=None,
return_residual=True, verbose=True)
"""
Explanation: Compute inverse solution
End of explanation
"""
plt.figure()
plt.plot(1e3 * stc.times, stc.data[::100, :].T)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.show()
"""
Explanation: Visualization
View activation time-series
End of explanation
"""
fig, axes = plt.subplots(2, 1)
evoked.plot(axes=axes)
for ax in axes:
ax.texts = []
for line in ax.lines:
line.set_color('#98df81')
residual.plot(axes=axes)
"""
Explanation: Examine the original data and the residual after fitting:
End of explanation
"""
vertno_max, time_max = stc.get_peak(hemi='rh')
subjects_dir = data_path + '/subjects'
surfer_kwargs = dict(
hemi='rh', subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[8, 12, 15]), views='lateral',
initial_time=time_max, time_unit='s', size=(800, 800), smoothing_steps=5)
brain = stc.plot(**surfer_kwargs)
brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue',
scale_factor=0.6, alpha=0.5)
brain.add_text(0.1, 0.9, 'dSPM (plus location of maximal activation)', 'title',
font_size=14)
"""
Explanation: Here we use peak getter to move visualization to the time point of the peak
and draw a marker at the maximum peak vertex.
End of explanation
"""
# setup source morph
morph = mne.compute_source_morph(
src=inverse_operator['src'], subject_from=stc.subject,
subject_to='fsaverage', spacing=5, # to ico-5
subjects_dir=subjects_dir)
# morph data
stc_fsaverage = morph.apply(stc)
brain = stc_fsaverage.plot(**surfer_kwargs)
brain.add_text(0.1, 0.9, 'Morphed to fsaverage', 'title', font_size=20)
del stc_fsaverage
"""
Explanation: Morph data to average brain
End of explanation
"""
stc_vec = apply_inverse(evoked, inverse_operator, lambda2,
method=method, pick_ori='vector')
brain = stc_vec.plot(**surfer_kwargs)
brain.add_text(0.1, 0.9, 'Vector solution', 'title', font_size=20)
del stc_vec
"""
Explanation: Dipole orientations
The pick_ori parameter of the
:func:mne.minimum_norm.apply_inverse function controls
the orientation of the dipoles. One useful setting is pick_ori='vector',
which will return an estimate that does not only contain the source power at
each dipole, but also the orientation of the dipoles.
End of explanation
"""
for mi, (method, lims) in enumerate((('dSPM', [8, 12, 15]),
('sLORETA', [3, 5, 7]),
('eLORETA', [0.75, 1.25, 1.75]),)):
surfer_kwargs['clim']['lims'] = lims
stc = apply_inverse(evoked, inverse_operator, lambda2,
method=method, pick_ori=None)
brain = stc.plot(figure=mi, **surfer_kwargs)
brain.add_text(0.1, 0.9, method, 'title', font_size=20)
del stc
"""
Explanation: Note that there is a relationship between the orientation of the dipoles and
the surface of the cortex. For this reason, we do not use an inflated
cortical surface for visualization, but the original surface used to define
the source space.
For more information about dipole orientations, see
tut-dipole-orientations.
Now let's look at each solver:
End of explanation
"""
|
juherask/YoungCodersIPythonNotebookFI | YoungCodersNotebook.ipynb | mit | 1+2
"""
Explanation: OPI PYTHONIN PERUSTEET
Tämä interaktiivinen harjoituskokoelma opettaa sinulle Python-ohjelmointikielen alkeet. Opas seurailee PyCon 2014 -tapahtumassa Barbara Shauretten ja Katie Cunninghamin pitämän Python-koulutuksen rakennetta.
Jos haluat tämän suomenkielisen oppaan sijaan tutustua englanninkieliseen alkuperäismateriaaliin (kalvot, esimerkit ym.), ne ovat ladattavissa täältä:
https://github.com/mechanicalgirl/young-coders-tutorial
Mitä ohjelmoinnilla tarkoitetaan:
* Tietokone on laite joka paitsi säilöö, myös siirtää, järjestää, luo ja hallitsee tietoa (eli dataa).
* Tietokoneohjelma puolestaan on joukko yksikäsitteisiä ohjeita, jotka kertovat tietokoneelle mitä datalle pitäisi tehdä. Ohjelmointikieli on se erityinen kieli, millä nämä ohjeet kirjoitetaan.
* Alla on joitain esimerkkejä ohjeista, jotka on tarkoitettu meille ihmisille (origamilinnun taitteluohje, pikkuleipäresepti ja kirjan sidontaohje):
Heti alkuun on hyvä sisäistää, että tietokone on kovin tyhmä. Se ymmärrä yllä olevan esimerkin kaltaisia oheita, vaikka ne ovat ihmiselle päivänselviä. Tietokoneelle ohjeet pitää kirjoittaa todella yksityiskohtaisesti, huolellisesti ja tarkasti, sillä se toimii täsmälleen kuten ohjeistettu. Tämä ohjeiden kirjoittaminen jollain ohjelmointikielellä on ohjelmointia - eli juuri sitä, mitä tämä opas opettaa.
Algoritmilla tarkoitetaan ohjeita jonkin tietyn asian tekemiseksi tietokoneella - usein tehokkaalla ja nopealla tavalla. Algoritmit ovat kuin reseptejä, eli askel-askeleelta eteneviä ohjeita, joiden lopputulos on haluttu (esim. kanelipulla tai aakkosjärjestykseen saatettu lista nimiä).
Pian kokeilemme kirjoittaa ohjeita tietokoneelle, mutta ensin on hyvä tietää, että ohjelmointikieliä on olemassa valtavasti erilaisia - eri käyttötarkoituksiin. Tässä oppaassa opettelemme ohjelmointikieltä nimeltä Python, joka on aloittelijaystävällinen ja silti valtavan tehokas ja ilmaisuvoimainen kieli.
Python voi näyttäytyä ohjelmoijalle monessa muodossa:
1. Yksinkertaisin tapa käyttää Pythonia on Python-komentotulkki, jonne kirjoitetaan komentoja, jotka tulkki sitten välittämästi suorittaa. Komentotulkin kanssa vuoropuheluna voi tehdä laskutoimituksia, opiskella Pythonin käyttöä tai kokeilla erilaisten ideoiden toimivuutta.
1. Komentotulkista on myös kehittyneempi ja käyttäjäystävällisempi versio nimeltä IPython. Se tarjoaa monenlaisia apuvälineitä Pythonin kirjoittamiseen: tiedostojen hallintaa, dokumentaatiota, erilaisia oikoteitä, komentohistorian ja paljon muuta.
1. IPython Notebookin, jota nyt parhaillaan käytät, lähestymistapa on erilainen: käytössäsi on nettiselaimessa (Chrome, Firefox ym.) toimiva vuorovaikutteinen muistikirjan sivu, jonne voi kirjoittaa paitsi tekstiä, myös Python-ohjelmointikielisiä käskyjä.
1. Python-ohjelmointikielellä voidaan myös tehdä itsenäisesti toimivia ohjelmia. Tällöin kirjoitetaan .py-tiedosto, joka annetaan tietokoneen suoritettavaksi. .py-tiedostojen kirjoittamiseen on monenlaisia työkaluja aina muistiosta tehokkaisiin integroituihin kehitysympäristöihin (eng. IDE). Näin tehty ohjelma voidaan jopa paketoida suoritettavaksi .exe-tiedostoksi.
1. Laskutoimituksia
Aloitetaan Python-ohjelmointi yksinkertaisella matematiikalla. Pythonia voi nimittäin käyttää kätevänä laskimena.
Naksauta hiirellä alla oleva harmaa laatikko aktiivisiksi ja paina [⇧SHIFT] + [↵ENTER], jolloin Python suorittaa laskutoimituksen, kertoo tuloksen ja siirtyy seuraavaan laatikkoon.
End of explanation
"""
12-3
"""
Explanation: Hyvä! Tee sama uudelleen alla olevalle laatikolle.
End of explanation
"""
9+2
"""
Explanation: Kokeile nyt kirjoittaa itse alla olevaan harmaaseen laatikkoon seuraava laskutoimitus.
Älä kuitenkaan kopioi >>>-merkkejä, sillä ne ovat vain ilmaisemassa, että kyseessä on Python-koodi:
```python
9+5-15```
End of explanation
"""
2**2
2/3.0
"""
Explanation: Paina sitten [CTRL] + [↵ENTER] ja Python laskee laskun, kertoo tuloksen ja pitää kyseisen laatikon aktiivisena siltä varalta, että haluat muokata sen sisältöä. Tämä siis erotuksena aiempaan [⇧SHIFT] + [↵ENTER] -komentoon, joka siirtyy heti eteenpäin. Toiminnolle löytyy myös [▶]-nappi yläreunan työkalupalkista.
Matemaattiset operaattorit
Pythonin matemaattiset perusoperaattorit ovat:
* yhteenlasku: +
* vähennyslasku: -
* kertolasku: *
* jakolasku: /
Kokeile vaikka näitä:
```python
65
1053
6/2
20/7
``
Tai voit kokeilla laskea ihan itse keksimilläsi luvuilla. Kokeile myös operaattoria*`. Keksitkö mitä se tekee?
End of explanation
"""
"hello world"
omppu
"""
Explanation: Sivuhuomautus: Pythonin 2.7 ja 3 versiot laskevat jakolaskun eri tavalla. Vanhempi ja käytöstä poistuva 2.7 saattaa tuottaa ikävän yllätyksen: 5/2 palauttaakin arvon 2, eikä 2.5. Jos Python 2.7:lla haluaa tarkan jakolaskun tuloksen on kirjoitettava 5.0/2.0. Tässä pitää olla tarkkana, sillä Python 2.7 -ympäristöihin saattaa törmätä vielä ajoittain.
Toinen sivuhuomautus: Saatat huomata tehdessäsi omia laskuja, että tulos ei ole aivan sitä mitä odotit. Tällöin järjestys, missä Python suorittaa laskuoperaatot saattaa olla eri mitä tarkoitit. Voit pakottaa Pythonin laskemaan haluamassasi järjestyksessä lisäämällä sulkuja. Esimerkiksi 3+4+5/3 ei tuota samaa vastausta kuin (3+4+5)/3.
Vertailuoperaattorit
<table>
<tr><td>
`== `</td><td>tutkii ovatko luvut yhtäsuuret
</td></tr>
<tr><td>
`!= `</td><td>tutkii ovatko luvut erisuuret
</td></tr>
<tr><td>
`< `</td><td>tarkistaa onko vasemmalla puolella oleva luku pienempi kuin oikealla puolella oleva
</td></tr>
<tr><td>
`> `</td><td>tarkistaa onko vasemmalla puolella oleva luku suurempi kuin oikealla puolella oleva
</td></tr>
<tr><td>
`<= `</td><td>tarkistaa onko vasemmalla puolella oleva luku pienempi *tai yhtäsuuri* kuin oikealla puolella oleva
</td></tr>
<tr><td>
`>= `</td><td>tarkistaa onko vasemmalla puolella oleva luku suurempi *tai yhtäsuuri* kuin oikealla puolella oleva
</td></tr>
</table>
Kokeile näitä alla. Esimerkiksi:
```python
5 < 4 + 3
12 + 1 >= 12
16 * 2 == 32
16 != 16
5 >= 6
```
Vertailuoperaatot ovat tärkeitä ohjelmoinnissa, kuten pian tulemme näkemään. Niiden avulla voimme ohjelmoida tietokoneen tekemään eri asioita eri tilanteissa.
2. Merkkijonot
Paitsi lukuja, käsittelevät tietokoneet myös paljon tekstimuotoista tietoa. Tekstimuotoinen tieto on tallennettu tietokoneelle jonona kirjoitusmerkkejä, eli tuttavallisemmin merkkijonona.
Python tulkitsee merkkijonoksi tekstinpätkän, joka on lainausmerkkien välissä. Kokeile:
```python
"hello world"
"omppu"
omppu
```
End of explanation
"""
color = 'yellow'
color = 12
"""
Explanation: Viimeisen esimerkin, eli sen ompun, jota ei oltu ympyröity lainausmerkein, tuleekin antaa virhe. Virheisiin palaamme pian, mutta tässä esimerkin tarkoitus on iskostaa mieleesi seuraava sääntö: jos haluat syöttää Pythonille merkkijonon, käytä lainausmerkkejä
Operaatioita merkkijonoille
Kuten luvuille, myös merkkijonoille voi tehdä erilaisia operaatioita:
yhdistäminen: +
toistaminen: *
Katso ja kokeile mitä Python tekee kun annat sille seuraavat ohjeet:
```python
"Hei " + "muukalainen"
"HAHA" * 100
```
3. Muuttujat
Tutustuimme edellä siihen, miten lukuja ja merkkijonoja käsitellään Pythonilla. Seuraavaksi tutustumme ideaan, jota voi käyttää molempien kanssa - eli muuttujaan.
Matematiikan tunnilta saatat muistaa x:n. Sitä käytettiin merkitsemään jotain tuntematonta (tai tunnettua) lukua. Ohjelmointikielessä nämä muuttujat toimivat hieman samalla tavalla.
Muuttujat ovat kuin laatikoita, joiden sisään voit pakata numeroita, merkkijonoja tai muita "juttuja".
Laske alla olevassa harmaassa laatikossa lasku:
```python
12 * 12
```
Python kuitenkin unohtaa tuloksen heti. Entäs jos haluaisit, että vastaus pysyy muistissa? Voit laittaa tuloksen muistiin sijoittamalla sen nimettyyn muuttujaan:
```python
tulos = 12 * 12
tulos
```
Muuttujat säästävät aikaa ja vaivaa, sillä voit nyt käyttää muuttujaa tulos uudelleen ja uudellen tulevissa laskutoimituksissa ja ohjeissa.
Huomaa, että tulos ei ole lainausmerkeissä. Näin siksi, että se ei ole merkkijono, vaan muuttuja. Yleensä kannattaa käyttää kuvaavia muuttujan nimiä. Näin ei tarvitse turhaan arvailla ja selvitellä, että mitä muuttujassa on sisällä - nimi kertoo sen jo.
Voit uudelleenkäyttää muuttujia. Sijoita vaan sinne uusi arvo. Esimerkiksi:
End of explanation
"""
color
"""
Explanation: Mitä luulet 'color' muuttujassa nyt olevan? Kokeillaan (suorita kaikki tämän esimerkin kolme laatikkoa ylhäältä alas):
End of explanation
"""
x = 3
y = 3
x==y
"""
Explanation: Muuttujat ovat ohjelmoinnissä hyvin tärkeitä. Siksi kertauksena:
* Voit laittaa tulokset talteen muuttujaan ja käyttää näitä tuloksia myöhemmin
* Voit vaihtaa muuttujan arvon vaikka sen nimi pysyy samana
* Muuttujaan sijoitetaan '=' operaattorilla. Tämä erotuksena '==' operaattoriin, joka siis tutkii ovatko luvut tai muuttujien sisältämät luvut samat (esimerkki alla). Tässä sekoilu on hyvin yleinen virhe.
End of explanation
"""
hedelma = "appelsiini"
hedelma[2]
indeksi = 4
hedelma[indeksi-2]
"""
Explanation: Tässä muuten joitain muita juttuja, mitä muuttujilla - ja erityisesti merkkijonomuuttujilla - voi tehdä.
End of explanation
"""
"kaveri"*5
"kaveri"+5
"""
Explanation: Alle voit kokeilla tätä merkkijonoihin indeksointia itse:
4. Virheet
Eräs ohjelmoinnissa hyvin tärkeä asia ovat virheet ja virheilmoitukset. Paraskaan ohjelmoija ei osaa kirjoittaa virheetöntä ohjelmakoodia. Siksi onkin hyvä, että työkalu osaa tunnistaa ja kertoa meille suurimmassa osassa tapauksista milloin olemme tehneet jotain väärin - ja mikä vielä tärkeämpää, missä ja mikä virhe oikein oli. Suorita alla olevat koodit.
End of explanation
"""
tervehdys = "Hei!"
type(tervehdys)
"""
Explanation: Mitä ylle ilmaantunut virhe mielestäsi tarkoittaa? Mitä 'str' ja 'int' tarkoittavat?
Pureskellaan virheilmoitus auki:
* Merkkijonot ('str' lyh. string)
* Kokonaisluvut ('int' lyh. integer)
* Molemmat ovat jotain objekteja (objects suom. olioita)
* Mutta Python ei osaa yhdistää niitä, sillä ne ovat eri tyyppisiä** (TypeError).
Usein lukemalla virheilmoituksen huolellisesti voi päästä jyvälle siitä mikä menee vikaan. Esimerkkitapauksessamme virhe olisi korjattavissa niin, että annamme myös numeron lainausmerkkien välissä olevana merkkijonona, jolloin yhdistettävät ovat molemmat merkkijonoja. Kokeile:
```python
"kaveri"+"5"
```
Perustietotyypit
Olemme jo tutustuneet kolmeen tietotyyppiin:
* "Hei!", joka on merkkijono (eng. string)
* 27, joka on kokonaisluku (eng. integer)
* 3.14, joka on desimaaliluku/liukuluku (eng. float)
Sivuhuomautus: Huomaa, että Pythonin desimaalierotin on piste eikä pilkku. Tämä on hyvin yleistä ohjelmointikielissä ja tietojenkäsittelyssä, mutta ensikertalaisen kannattaa kiinnittää asiaan huomiota.
Python osaa kertoa meille tyypin Pythoniin sisäänrakennetulla type(...)-funktiolla. Funktioihin tutustumme lähemmin tuonnempana.
End of explanation
"""
type(12)
"""
Explanation: Hae alla vastaavalla tavalla tyyppikuvaus kokonaisluvulle ja liukuluvulle:
End of explanation
"""
float(5)
"""
Explanation: Voit muuttaa tietoa toiseen muotoon int(...), str(...) ja float(...) -funktioilla. Esimerkiksi:
End of explanation
"""
hedelmat = ["omena", "banaani", "mandariini"]
numerot = [3, 17, -4, 8.8, 1]
"""
Explanation: Kokeile muuttaa kokonaisluku merkkijonoksi:
Entäs muuttuuko numeron sisältävä merkkijono desimaaliluvuksi?:
5. Listat
Lista voi pitää sisällään sarjan olioita. Esimerkiksi:
End of explanation
"""
type(hedelmat), type(numerot)
"""
Explanation: Arvaa mitä type(...)-fuktio osaa kertoa:
End of explanation
"""
hedelmat[0]
numerot[1]
"""
Explanation: Voit käyttää kokonaislukuindeksiä ja hakasulkuoperaattoria hakeaksesi jonkin listan alkion:
End of explanation
"""
hedelmat[-1]
"""
Explanation: Huomaa edellisistä esimerkistä pari seikkaa. Ohjelmointikielissä on tyypillistä, että listan ensimmäinen alkio haetaan indeksillä 0 - se on siis tavallaan "nollas" alkio eikä "ensimmäinen". Näin myös Pythonissa.
Mihin negatiivinen indeksi -1 osoittaa? Entä -2? Kokeile arvasitko oikein:
End of explanation
"""
hedelmat[1][3]
"""
Explanation: Tehtävä:
Tee lista kolmesta lempiväristäsi:
Käytä indeksiä ja hakasulkuoperaattoria hakeaksesi yhden lempiväreistäsi:
Extratehtävä: mitä tapahtuu jos indeksoit kahdesti hedelmat-listaa tai lempivärilistaa? Miksi tulos on se mikä se on?
End of explanation
"""
1==1
15 < 5
"""
Explanation: 6. Totuusarvot (eng. booleans)
Totuusarvo voi olla True tai False
End of explanation
"""
1==1 and 2==2 # molemmat ovat tosia, lopputulos tosi
1==1 and 2!=2 # Vain toinen on tosi, lopputulos epätosi
1==2 and 2==3 # Kummatkaan eivät ole tosia, lopputulos epätosi
1==1 or 2==2 # molemmat ovat tosia, lopputulos tosi
1==1 or 2!=2 # Vain toinen on tosi, lopputulos silti tosi
1==2 or 2==3 # Kummatkaan eivät ole tosia, lopputulos epätosi
1==1
not 1==1
"""
Explanation: Kokeile mitä tapahtuu jos kirjoitat Pythonille pelkän True tai False. Entäs pystytkö sijoittamaan totuusarvon muuttujaan?
Mikä on totuusarvon tyyppi? Tarkista se type(...)-funktiolla, kuten teimme kokonaisluvuille ja merkkijonoille:
Huomaa, että True ja False on kirjoitettu suurella alkukirjaimella, ja että ne eivät ole ympäröity lainausmerkein, eli ne eivät ole merkkijonoja. True ja False ovatkin Pythonin varattuja sanoja, eli kieleen sisäänrakennettuja asioita.
and / or / not
Myös and, or ja not ovat Pythonissa varattuja sanoja. Niillä voi muuttaa totuusarvoja, sekä yhdistää niitä tuottavia vertailuoperaatioita toisiinsa.
Alla risuaidan # jälkeen kirjoitettua ei tulkita Python-kieleksi. Ne ovat koodin sekaan kirjoitettuja kommentteja, mikä onkin usein tarpeen, jotta koodin tarkoitusta ei tarvitse turhaan kummastella. Kommentit auttavat koodaajaa - ja mikä tärkeämpää - muita koodaajia ymmärtämään mitä koodi tekee.
End of explanation
"""
"test" == "Test"
"""
Explanation: Tehtävä:
Kokeile sijoittaa muutama totuusarvo muuttujiin (esim. a ja b) ja kokeile itse and, or ja not operaattoreita.
XOR: Osaatko tehdä vertailun, joka palauttaa True vain jos jompikumpi vertailtavasta on tosi (ns. exclusive-or, xor)? Vinkki: käytä muuttujia ja ryhmittele vertailuoperaattoreita suluilla. Esim:
```python
not (True and (True or False))
```
Merkkijonojen vertailu: Kokeile vielä verrata merkkijonoja keskenään. Kokeile onko suurilla ja pienillä kirjaimilla merkitystä. Yllätyitkö?
End of explanation
"""
for numero in [1,2,3,4,5]:
print("Hei", numero)
"""
Explanation: 7. Logiikka
Tähän mennessä olemme käyttäneet Pythonia lähinnä moneen hienoon temppuun taipuvana laskukoneena. Jotta pystyisimme kirjoittamaan tietokoneelle pidempiä ohjeita, tulee meidän tutustua keinoihin joilla ohjeiden logiikka kuvataan Pythonin kaltaisissa ohjelmointikielissä. Tähän on käytettävissä kaksi perustekniikkaa: ehdot ja simukat.
7.1. Ehtolause
if -lause, eli ehtolause, on tapa pistää tietokone päättämään mitä sen tulisi koodia tulkitessaan seuraavaksi tehdä.
Alla on muutamia esimerkkejä, joita saattaisit käyttää tosielämän päätöksentekoon:
"Jos olet nälkäinen, mennään lounaalle."
"Jos roskis on täynnä, mene ja tyhjää se."
Alla esimerkki siitä, miten samankaltainen asia ilmaistaisiin Python-koodissa:
```python
nimi = "Jussi"
if name == "Jussi":
····print("Moi Jussi!")
``Moi Jussi!`
Huomattavaa: Pythonissa ehtolauseen jälkeen tulee kaksoispiste, joka puolestaan kertoo että seuraavalta riviltä alkaa uusi lohko. Lohko on pätkä ohjelmakoodia, joka kuuluu yhteen, ja jonka tietokone suorittaa kokonaisuutena rivi kerrallaan (ylhäältä alas). Lohko on sisennetty neljällä välilyönnillä ja kaikki samaan tasoon sisennetyt peräkkäiset rivit kuuluvat samaan lohkoon (yllä olevassa esimerkissä pisteet tarkoittavat välilyöntejä, ja ovat siinä näkyvillä vain siksi, että välilyöntien käyttö tulisi selvemmäksi).
Mutta entä jos henkilö ei olekaan Jussi? Pythonissa on varattu sana else näitä tilanteita varten:
```python
if name=="Jussi":
····print("Moi Jussi!")
else:
····print("Hujari!")
```
Jos taas henkilöitä on enemmän, voidaan käyttää sanaa elif tämän toisen ehdon tarkastamiseen:
```python
if name=="Jussi":
····print("Moi Jussi!")
elif name=="Aki":
····print("Hei Aki!")
else:
····print("Kuka sinä olet?")
```
Huomattavaa:
* elif -ehto tarkastetaan vain jos mikään ennen sitä olevista ehdoista ei toteutunut.
* else -lohko suoritetaan vain jos mikään aiempi ehto ei toteutunut.
Toisinsanoen: if/elif/else rakenteesta suoritetaan aina vain yksi haara (lohko).
Tehtävä:
Kirjoita koodinpätkä, joka tulostaa ruudulle "Yeah!" jos muuttuja nimeltä color on "yellow":
Lisää ehtoon vielä elif ja else -haarat, joissa tutkit onko muuttujassa jokin toinen väri ja jos on, tulostat jotain muuta nokkelaa.
7.2. Silmukat
Toinen perusrakenne ohjelmointikielissä on silmukat. Sitä käytetään kun halutaan, että tietokone toistaa jokin asia monta kertaa. Silmukoita on kahdenlaisia:
* Laskevat silmukat, jotka toistetaan tietyn monta kertaa
* Ehdolliset silmukat, joita toisteaan kunnes jokin ehto täyttyy
Laskeva silmukka
Laskevat silmukat kirjoitetaan pythonissa käyttäen for-avainsanaa ja siksi niitä sanotaankin for-silmukoiksi. Silmukka vaatii myös nimetyn laskumuuttujan (esimerkissä numero), in-avainsanan ja esim. listan numeroita, jotka silmukan laskumuuttuja saa järjestyksessä yksi toisensa jälkeen. Huomaa myös kaksoispiste rivin lopussa ja uuden lohkon merkiksi tehty sisennys:
End of explanation
"""
list( range(1,5) ) # voit for-silmukassa jättää list(...)-osan pois.
"""
Explanation: Tehtävä:
Kirjoita koodinpätkä, joka laskee kertoman (merkitään matematiikassa huutomerkillä !), eli kertoo peräkkäiset numerot keskenään. Esim.
5! = 5*4*3*2*1
Käytä muuttujaa, johon säilöt tuloksen. Pitkien lukulistojen kirjoittaminen on työlästä, joten kannattaa käyttää Pythoniin sisäänrakennettua range(a,b)-funktiota, joka tekee sinulle lukulistan a:sta b:hen. Havainnollistava esimerkki alla:
End of explanation
"""
lkm = 1
while (lkm<4):
print ('toistojen lukumäärä on', lkm)
lkm = lkm+1
"""
Explanation: Ehdollinen silmukka
Ehdollinen silmukka pyörii kunnes joku ehto täyttyy (tai jää täyttymättä). Pythonissa tämä on toteutettavissa while(...): rakenteella, missä sulkujen sisään tulee tutkittava ehto ja alle toistettava koodilohko.
End of explanation
"""
def moikkaa_mua(): # tämä on aliohjelman esittely
print("moi")
moikkaa_mua() # tämä on aliohjelmakutsu
"""
Explanation: Huomattavaa: on tärkeää, että ehto jää täyttymättä edes joskus, sillä muuten kyseessä on päättymätön silmukka. Niillekin on paikkansa, mutta yleensä ohjelma menee ns. jumiin, jos se jää pyörimään tällaiseen.
Tehtävä:
Käy edellä olevaa esimerkkiä rivi riviltä päässäsi läpi suorittaen sitä tietokoneen lailla askel kerrallaan. Huomioi missä vaihessa muuttujan arvoa kasvatetaan ja missä vaiheessa ehdon tarkastus tehdään.
9. Aliohjelmat
Ajattele keksirepsetiä:
Kumpi on helpompaa, noudattaa reseptiä, vai kysyä äidiltä, että "leipoisitko keksejä"? Ohjelmoinnissa tällainen pyyntö vertautuu aliohjelmakutsuun, sillä aliohjelma on joukko yhteen liittyviä ohjeita paketoituna nimetyksi kokonaisuudeksi.
Tämä on kätevää, sillä vaikka reseptin seuraaminen meiltä onnistuisikin, on pyytäminen huomattavasti helpompaa. Ja asioiden tekeminen helpommin, tehokkaammin ja nopeammin on pitkälti se, mistä ohjelmoinnissa on kyse.
Aliohjelmien syntaksi (eli kirjoitustapa)
Pythoniksi muutettuna tämä kaunis pyyntö saada tuoreita pikkuleipiä voitaisiin ilmaista vaikka näin:
!Python
pikkuleivat = leivo_pikkuleipia(montako_tarvitaan)
Tässä leivo_pikkuleipia on aliohjelman nimi ja montako_tarvitaan on parametri, joka määrää sen kuinka suurelle joukolle (eli kuinka montakertaisen pikkuleipätaikinan) äiti leipoo.
Pythonissa aliohjelmat esitellään def-avainsanaa käyttäen, jonka jälkeen annetaan sille nimi ja sulut. Esittelyrivi päättyy kaksoispisteeseen, ja seuraavalla rivillä on aliohjelman toteutuksen määrittelevä koodilohko, eli aliohjelman runko.
Aliohjelmaa kutsutaan kirjoittamalla sen nimi ja sulut:
End of explanation
"""
def moikkaa_kaveria(nimelta): #esittely
print("moi", nimelta)
moikkaa_kaveria("Jussi")
"""
Explanation: Huomattavaa: esittelyn ja kutsumisen eron ymmärtäminen on hyvin tärkeää. Siis kertauksena: esittelyssä kerrotaan yksityiskohtaisesti miten joku asia tehdään ja kutsuttaessa pyydetään tekemään kyseinen asia!
Aliohjelmalle voidaan lisätä parametreja kirjoittamalla ne sulkujen väliin. Näitä parametrimuuttujia voidaan sitten käyttää aliohjelman lohkon koodissa kuten mitä tahansa muuttujaa. Huomaa, että jos aliohjelmalle on lisätty parametri, tällöin myös aliohjelman kutsussa pitää antaa kyiseiselle parametrimuuttujalle arvo. (suorita alla oleva koodi)
End of explanation
"""
1+2
"""
Explanation: Tehtävä:
Kutsu edellä esiteltyä moikkaa_kaveria-aliohjelmaa niin, että se moikkaa sinua. Esittele sitten uusi aliohjelma, joka ottaa kaksi parametria (parametrit erotetaan toisistaan pilkulla): tervehdyksen ja listan nimiä ja moikkaa kaikkia listassa olevia henkilöitä (kts. for-silmukan ja listan ohjeet yltä, jos et muista miten niitä käytettiin).
End of explanation
"""
def tuplaa(numero): #esittely
return numero*2
tuplaa (4) #kutsu
"""
Explanation: Aliohjelmien paluuarvot
Käytämme edelleen pikkuleipiä leipovaa äitiä apuna:
!Python
pikkuleivat = leivo_pikkuleipia(montako_tarvitaan)
Rivin alkuun on nyt lisätty pikkuleivat-muuttuja ja yhtäsuuruusmerkki. Tähän muuttujaan puolestaan sijoitetaan aliohjelman paluuarvo, eli valmistuneet pikkuleivät.
Tässä yhteydessä toinen hyödyllinen tapa ajatella aliohjelmia on tehdasvertaus: Aliohjelma on kuin tehdas, minne menee raaka-aineita sisään (parametrit) ja ulos tulee valmis tuote (paluuarvo). Emme nimittäin ole kiinnostuneita siitä miten tehdas toimii, kunhan se vain tekee sen mitä lupaa.
Aliohjelman esittelyn yhteydessä, sen koodilohkossa, käyttämällä return-avainsanaa, voidaan lopettaa aliohjelma samalla palauttaen jokin arvo. Esimerkiksi näin (suorita alla oleva koodi):
End of explanation
"""
def sano_kohteliaisuus():
print("Kirjoita nimesi:")
nimi = input()
print("Kirjoita lempinumerosi:")
vari = int(input())
print( "Hei",nimi+",", vari, "on sitten hyvä numero.")
sano_kohteliaisuus()
"""
Explanation: Tehtävä:
Tuplaa 12 käyttäen tuplaa-aliohjelmaa.
Tai, mitä tapahtuu jos tuplaat oman nimesi? Kokeile.
Yhteenveto aliohjelmista
Opimme paljon Pythonin aliohjelmista, mutta kerrataan vielä:
Aliohjelmat esitellään def-avainsanalla
Aliohjelmia kutsutaan kirjoittamalla sen nimi ja heti nimen perään sulut: kutsun_nimi()
Aliohjelmakutsun sulkujen väliin kirjoitetaan mahdolliset parametrien arvot pilkuilla eroteltuna
Jos aliohjelma palauttaa paluuarvon, sen voi ottaa talteen muuttujaan yhtäsuuruusmerkillä (sijoitus)
Aliohjelman runkolohkossa voi lopettaa aliohjelman suorituksen ja palauttaa paluuarvon return-avainsanalla.
10. Syöte
Syöte on tietoa, jota syötämme ohjelmalle. Tämä tieto voidaan sitten vaikkapa välittää aliohjelmalle tai näyttää se ruuudulla.
Pythonissa on sisäänrakennettu aliohjelma nimeltä input(), joka kysyy käyttäjältä jotain. Esimerkki alla (suorita se ja vastaa kysymyksiin):
End of explanation
"""
import os
for tiedosto in os.listdir( os.getcwd() ):
print(tiedosto)
import urllib.request
py_home = urllib.request.urlopen("http://www.python.org")
print(py_home.read()) #Tulostaa pythonin kotisivun HTML-muodossa.
"""
Explanation: Tehtävä:
Kirjoita koodi, joka kysyy kaksi numeroa ja laskee ne yhteen.
11. Modulit
"Pyörää ei kannata keksiä uudelleen" on sanonta, joka pätee erityisen hyvin koodauksessa. Suuri osa yksinkertaisista koodaustehtävistä on jo tehty, ja usein nopein tapa saada asioita aikaan on uudelleenkäyttää jonkun toisen kirjoittamaa ohjelmakoodia.
Pythonin tarjoama ratkaisu tähän on modulit. Voit ajatella modulia vaikka kokoelmana yhteen littyviä aliohjelmia, jotka tekevän jonkin tehtävän tekemisestä helppoa. Ne ovat tavallaan ohjelmoinnin leegopalikoita. Pythonin mukana tulee koko joukko moduleita, tutustutaan alla niistä muutamaan:
Satunnaisen numeron arpominen väliltä 1-100:
```python
import random
print( random.randint(1,100) )
```
Kellonajan tarkistaminen:
```python
from datetime import datetime
print( datetime.now() )
``
*from X import Ytarkoittaa tässä sitä, että haemmedatetimemodulista vaindatetime` nimisen "palikan".
Kalenterin tulostaminen:
```python
import calendar
calendar.prmonth(2015, 9)
```
Huomattavaa: moduleiden sisällä asustavia aliohjelmia ja muita "juttuja" kutsutaan pistenotaatiolla, eli niin, että ensin tulee modulin nimi, sitten piste, sitten kutsuttavan aliohjelman (tai muun "jutun") nimi, ja aliohjelman ollessa kyseessä sulut ja niiden väissä parametrit.
Tehtävä:
Kokeile itse käyttää esimerkkien moduleita kirjoittamalla niiden koodi alle (muistathan, että ">>>" EI ole osa kopioitavaa koodia):
Alla esitellään esimerkkien avulla vielä pari mielenkiintoista modulia. Sisäänrakennettuja moduleita voit selata Pythonin dokumentaation modulilistasta. Lisäksi netti on puolellaan kolmannen osapuolen moduleita, tarpeeseen jos toiseenkin.
End of explanation
"""
import turtle
turtle.reset()
#Kirjoita kilpikonnan ohjauskoodi tähän
turtle.fd(100)
turtle.Screen().mainloop()
"""
Explanation: Tehtävä:
Hyödynnä äsken ja aiemmin oppimaasi ja kirjoita alle peli, joka arpoo satunnaisen numeron (random), ja pyytää sinua arvaamaan sen (print, input) kunnes (while) osut oikeaan. Voit rullailla ylöspäin tätä Notebookia, jos et muista jotain yksityiskohtaa. Jos peli tuntuu liian vaikealta, voit laittaa ohjelman tulostamaan vinkkejä, kuten "luku on pienempi" tai "polttaa".
12. Kilpikonnagrafiikkaa
Pythonissa on eräs mielenkiintoinen moduli nimeltä turtle, jolla leikimme seuraavaksi. Kyseessä on omassa ikkunassaan asuva kilpikonna, jota ohjaat kirjoittamalla koodia.
Valitettavasti Pythonin sisäänrakennettu kilpikonna ei toimi verkossa olevassa Jupyter-muistilehtiössä (esim. Azure Notebooks), vaan tähän tehtävään tarvitset paikallisen Python-asennuksen. Suositeltavaa on käyttää Anaconda-jakelua. Asennuspaketti eri alustoille (Windows/OSX/Linux) on ladattavissa oheisen linkin takaa.
Kun olet asentanut Anacondan ja avannut tämän ipynb-tiedoston siellä, meillä on toimiva kilpikonna. Pistetäänhän se liikkeelle!
End of explanation
"""
from mobilechelonian import Turtle
t = Turtle()
def painettu_ylos():
t.setheading(90)
t.forward(20)
def painettu_alas():
t.setheading(-90)
t.forward(20)
def painettu_vasemmalle():
t.setheading(180)
t.forward(20)
def painettu_oikealle():
t.setheading(0)
t.forward(20)
# Kiinnitetään näppäimenpainallukset edellä oleviin aliohjelmiin
# kun näppäintä painetaa, aliohjelmaa kutsutaan.
screen = turtle.Screen()
screen.onkey(painettu_ylos, "w")
screen.onkey(painettu_vasemmalle, "a")
screen.onkey(painettu_alas, "s")
screen.onkey(painettu_oikealle, "d")
screen.listen()
screen.mainloop()
"""
Explanation: Jos saat virheilmoituksen, jokin meni pieleen, eikä Jypyter-ympäristö asentunut tai käynnistynyt koneellasi oikein. Tarkista, että olet käynnistänyt Jupyter Notebook -palvelimen käynnistysvalikosta ja että selain on aukaissut seuraavan sivun: http://127.0.0.1:8888
Kun ajat kodin kilpikonna vain kököttää paikallaan. Pistetään siihen vähän liikettä. ENSIN KUITENKIN SULJE AUENNUT KILPIKONNAIKKUNA RUKSISTA. JOS ET TEE TÄTÄ, ET NÄE MUUTTUNEEN KOODIN VAIKUTUKSIA.
Alla joitain "temppuja", mitä kilpikonna osaa:
python
turtle.forward(10) # kilppari kulkee eteenpäin annetun määrän askelia
turtle.right(45) # kilppari kääntyy annetun määrän asteita oikealle
turtle.left(90) # kilppari kääntyy annetun määrän asteita oikealle
Tehtävä:
Käytä näitä kolmea käskyä ja silmukoita tuottaaksesi erilaisia toinen toistaan villimpiä kuvioita. Voit kirjoittaa koodin yllä olevaan runkoon. Huom: Komennot ovat myös lyhennettävissä fd, rt, lt jne.
Jos jäit koukkuun, Pythonin turtle-modulin dokumentaatiosta voit lukea mitä kaikkia temppuja Python-kilppari osaa. Voit mm. vaihtaa ikkunan taustaväriä, kilpikonnan muotoa, viivan paksuutta ja väriä jne.
Lisätehtävä:
Dokumentaatiota lukemalla paljastuu, että kilpikonnaikkunan kautta voi lukea hiiren ja näppäimistön painalluksia. Tämä mahdollistaa jo vaikka mitä! Kokeile mitä alla oleva koodi tekee ja jatkokehitä koodia vaikka peliksi asti!
End of explanation
"""
|
DOV-Vlaanderen/pydov | docs/notebooks/customizing_object_types.ipynb | mit | %matplotlib inline
"""
Explanation: Examples of object type customization
Listing techniques per CPT measurement
End of explanation
"""
from pydov.types.fields import XmlField, XsdType
from pydov.types.abstract import AbstractDovSubType
from pydov.types.sondering import Sondering
"""
Explanation: While performing CPT measurements, different techniques can be used. Since these can have an impact on the results, it can be interesting to download this additional information in order to better comprehend the CPT data.
Different CPT techniques can be applied at various depths, so the easiest way to add this to pydov is to use a new Sondering subtype Techniek, as shown below. The result will be that one can then choose to query CPT measurements and either retrieve a dataframe with the measurements themselves, or a dataframe with the techniques applied. The user can subsequently compare or merge the two dataframes at will.
End of explanation
"""
class Techniek(AbstractDovSubType):
rootpath = './/sondering/sondeonderzoek/penetratietest/technieken'
fields = [
XmlField(name='techniek_diepte_van',
source_xpath='/diepte_van',
definition='Enkel van toepassing voor het plaatsen van voerbuizen - '
'(code V) of het boren door een harde laag (code B).',
datatype='float'),
XmlField(name='techniek_diepte',
source_xpath='/diepte_techniek',
definition='Diepte waarop techniek toegepast werd.',
datatype='float'),
XmlField(name='techniek',
source_xpath='/techniek',
definition='De gebruikte techniek.',
datatype='string',
xsd_type=XsdType(
xsd_schema='https://www.dov.vlaanderen.be/xdov/schema/latest/xsd/kern/sondering/SonderingDataCodes.xsd',
typename='SondeerTechniekEnumType')),
XmlField(name='techniek_andere',
source_xpath='/techniek_andere',
definition="De gebruikte techniek (enkel van toepassing indien de techniek = 'andere').",
datatype='string')
]
"""
Explanation: A new subtype has to be a subclass of the AbstractDovSubType class and implement two class variables: rootpath and fields.
The rootpath is the XML XPath expression matching all instances of this subtype in the XML. One instance of this subtype will be created for each element matched by the rootpath XPath expression.
In the fields all the fields of this subtype are listed. These are instances of XmlField and should have at minimum a name, a source_xpath and a datatype. Additionally, a field should have a definition and can have a reference to an XSD schema type. The latter will be resolved and parsed at runtime, resulting in a list of values of this field.
End of explanation
"""
class SonderingTechnieken(Sondering):
subtypes = [Techniek]
"""
Explanation: In order to be able to use this subtype in a search query, we have to create a subclass of the original main type (Sondering) and register our new subtype:
End of explanation
"""
from pydov.search.sondering import SonderingSearch
cpts = SonderingSearch(objecttype=SonderingTechnieken)
"""
Explanation: The next step is to instantiate the SonderingSearch class with our newly created type:
End of explanation
"""
cpts.get_fields()['techniek']
"""
Explanation: If everything worked out, you should be able to see the new fields in the get_fields output:
End of explanation
"""
from pydov.util.location import WithinDistance, Point
df = cpts.search(location=WithinDistance(Point(150000, 150000), 10000, 'meter'))
"""
Explanation: Querying is exactly the same as with the default Sondering type:
End of explanation
"""
df['techniek_label'] = df['techniek'].map(cpts.get_fields()['techniek']['values'])
df.head()
"""
Explanation: One can use the values from the XSD type to add a human-readably column with the different techniques:
End of explanation
"""
from pydov.types.fields import XmlField, XsdType
from pydov.types.boring import Boring
class BoringMethodeXyz(Boring):
__generiekeDataCodes = 'https://www.dov.vlaanderen.be/xdov/schema/latest/xsd/kern/generiek/GeneriekeDataCodes.xsd'
fields = Boring.extend_fields([
XmlField(name='methode_xy',
source_xpath='/boring/xy/methode_opmeten',
definition='Methode waarop de x en y-coordinaat opgemeten werden.',
datatype='string',
xsd_type=XsdType(
xsd_schema=__generiekeDataCodes,
typename='MethodeOpmetenXyEnumType')),
XmlField(name='betrouwbaarheid_xy',
source_xpath='/boring/xy/betrouwbaarheid',
definition='Betrouwbaarheid van het opmeten van de x en y-coordinaat.',
datatype='string',
xsd_type=XsdType(
xsd_schema=__generiekeDataCodes,
typename='BetrouwbaarheidXyzEnumType')),
XmlField(name='methode_mv',
source_xpath='/boring/oorspronkelijk_maaiveld/methode_opmeten',
definition='Methode waarop de Z-coördinaat van het maaiveld opgemeten werd.',
datatype='string',
xsd_type=XsdType(
xsd_schema=__generiekeDataCodes,
typename='MethodeOpmetenZEnumType')),
XmlField(name='betrouwbaarheid_mv',
source_xpath='/boring/oorspronkelijk_maaiveld/betrouwbaarheid',
definition='Betrouwbaarheid van het opmeten van de z-coordinaat van het maaiveld.',
datatype='string',
xsd_type=XsdType(
xsd_schema=__generiekeDataCodes,
typename='BetrouwbaarheidXyzEnumType')),
XmlField(name='aanvangspeil_mtaw',
source_xpath='/boring/aanvangspeil/waarde',
definition='Hoogte in mTAW van het startpunt van de boring (boortafel, bouwput etc).',
datatype='float'),
XmlField(name='methode_aanvangspeil',
source_xpath='/boring/aanvangspeil/methode_opmeten',
definition='Methode waarop de Z-coördinaat van het aanvangspeil opgemeten werd.',
datatype='string',
xsd_type=XsdType(
xsd_schema=__generiekeDataCodes,
typename='MethodeOpmetenZEnumType')),
XmlField(name='betrouwbaarheid_aanvangspeil',
source_xpath='/boring/aanvangspeil/betrouwbaarheid',
definition='Betrouwbaarheid van het opmeten van de z-coordinaat van het aanvangspeil.',
datatype='string',
xsd_type=XsdType(
xsd_schema=__generiekeDataCodes,
typename='MethodeOpmetenZEnumType')),
])
"""
Explanation: Adding location and height details to Boring dataframe
There is more to the location of a borehole than meets the eye! The default dataframe lists already multiple fields regarding the location of the borehole, both planimetric as altimetric:
x and y are the planimetric coordinates of the borehole
start_boring_mtaw is the height of the start (aanvangspeil) of the borehole
mv_mtaw is the height of the ground level at the time of the making of the borehole
However, we have more information available regarding the (origin of) these coordinates. Each of them has an associated method (methode) and reliability (betrouwbaarheid).
We also make the distinction between the height of the ground level (maaiveld) and the height of the start of the borehole (aanvangspeil). If the borehole was started at ground level both are the same, but this is not necessarily the case. Furthermore the height of the start of the borehole can be either absolute (measured individually) or relative to the ground level.
If we want to have all this extra information available when retrieving the borehole dataframe output (or that of another DOV type), we can add the extra XML fields in a subclass of the Boring type:
End of explanation
"""
from pydov.search.boring import BoringSearch
bs = BoringSearch(objecttype=BoringMethodeXyz)
bs.get_fields()['mv_mtaw']
"""
Explanation: When instantiating our BoringSearch object, we now explicitly set our new type as objecttype to search:
End of explanation
"""
from pydov.util.location import WithinDistance, Point
df = bs.search(location=WithinDistance(Point(150000, 150000), 10000, 'meter'),
return_fields=('pkey_boring', 'boornummer', 'x', 'y', 'methode_xy', 'betrouwbaarheid_xy',
'mv_mtaw', 'methode_mv', 'betrouwbaarheid_mv', 'aanvangspeil_mtaw',
'methode_aanvangspeil', 'betrouwbaarheid_aanvangspeil', 'start_boring_mtaw'))
df.head()
df_plot = df.groupby('methode_xy').size().sort_values()
ax = df_plot.plot.barh()
ax.set_xlabel("count");
ax.set_title("Number of boreholes per coordinate digitization method");
df_plot = df.groupby('methode_mv').size().sort_values()
ax = df_plot.plot.barh()
ax.set_xlabel("count");
ax.set_title("Number of boreholes per height digitization method");
df_plot = df.groupby('methode_aanvangspeil').size().sort_values()
ax = df_plot.plot.barh()
ax.set_xlabel("count");
ax.set_title("Number of boreholes per `aanvangspeil` digitization method");
"""
Explanation: Searching for boreholes remains exactly the same, but will reveal the extra information in the output dataframe:
End of explanation
"""
from pydov.types.fields import XmlField, XsdType
from pydov.types.abstract import AbstractDovSubType
from pydov.types.grondwaterfilter import GrondwaterFilter
class Gxg(AbstractDovSubType):
rootpath = './/filtermeting/gxg'
fields = [
XmlField(name='gxg_jaar',
source_xpath='/jaar',
definition='jaar (hydrologisch jaar voor lg3 en hg3, kalenderjaar voor vg3)',
datatype='integer'),
XmlField(name='gxg_hg3',
source_xpath='/hg3',
definition='gemiddelde van de drie hoogste grondwaterstanden in een hydrologisch '
'jaar (1 april t/m 31 maart) bij een meetfrequentie van tweemaal per maand',
datatype='float'),
XmlField(name='gxg_lg3',
source_xpath='/lg3',
definition='gemiddelde van de drie laagste grondwaterstanden in een hydrologisch jaar '
'(1 april t/m 31 maart) bij een meetfrequentie van tweemaal per maand',
datatype='float'),
XmlField(name='gxg_vg3',
source_xpath='/vg3',
definition='gemiddelde van de grondwaterstanden op 14 maart, 28 maart en 14 april in '
'een bepaald kalenderjaar',
datatype='float')
]
class GrondwaterFilterGxg(GrondwaterFilter):
subtypes = [Gxg]
from pydov.search.grondwaterfilter import GrondwaterFilterSearch
from owslib.fes import PropertyIsEqualTo
fs = GrondwaterFilterSearch(objecttype=GrondwaterFilterGxg)
fs.get_fields()['gxg_vg3']
df = fs.search(
query=PropertyIsEqualTo('pkey_filter', 'https://www.dov.vlaanderen.be/data/filter/1999-009146')
)
df.head()
"""
Explanation: Listing GxG for GrondwaterFilters
For some of our Grondwaterfilters precalculated groundwaterlevel statistics (GxG) are available next to the individual measurements (peilmetingen) themselves. These statistics give information about the average high, low and medium groundwater levels at that location.
They can be obtained by defining a new subtype for the GrondwaterFilter type:
End of explanation
"""
from pydov.types.abstract import AbstractDovSubType
from pydov.types.boring import Boring
class Kleur(AbstractDovSubType):
rootpath = './/boring/details/kleur'
fields = [
XmlField(name='diepte_kleur_van',
source_xpath='/van',
definition='Bovenkant van de laag met een bepaalde bekisting, in meter.',
datatype='float'),
XmlField(name='diepte_kleur_tot',
source_xpath='/tot',
definition='Onderkant van de laag met een bepaalde bekisting, in meter.',
datatype='float'),
XmlField(name='kleur',
source_xpath='/kleur',
definition='Grondkleur voor het diepte-interval',
datatype='string')
]
class BoringKleur(Boring):
subtypes = [Kleur]
from pydov.search.boring import BoringSearch
from owslib.fes import PropertyIsEqualTo
bs = BoringSearch(objecttype=BoringKleur)
bs.get_fields()['kleur']
df = bs.search(
query=PropertyIsEqualTo('pkey_boring', 'https://www.dov.vlaanderen.be/data/boring/2004-103984')
)
df.head()
"""
Explanation: Adding color to the Borehole dataframe
The default dataframe of the Boring datatype contains information about the 'boormethode' as its subtype. However, in the DOV database there is information about the observed color of the soil in the various depth intervals available as well. We can access this data in pydov by defining a new subtype 'Kleur':
End of explanation
"""
from pydov.types.fields import XmlField
from owslib.fes import PropertyIsEqualTo
from pydov.search.grondmonster import GrondmonsterSearch
from pydov.types.grondmonster import Grondmonster
class GlauconietWaarden(Grondmonster):
fields = Grondmonster.extend_fields([
XmlField(name='glauconiet_gt500',
source_xpath='/grondmonster/observatieData/observatie['
'parameter="GLAUCONIET_GT500"]/'
'waarde_numeriek',
definition='Glauconiet fractie groter 500 micron (%)',
datatype='float'),
XmlField(name='glauconiet_tss',
source_xpath='/grondmonster/observatieData/observatie['
'parameter="GLAUCONIET_TSS"]/'
'waarde_numeriek',
definition='Glauconiet fractie kleiner 500micron en groter 63micron (%)',
datatype='float'),
XmlField(name='glauconiet_kl63',
source_xpath='/grondmonster/observatieData/observatie['
'parameter="GLAUCONIET_KL63"]/'
'waarde_numeriek',
definition='Glauconiet fractie kleiner 63micron (%)',
datatype='float')
])
gm = GrondmonsterSearch(objecttype=GlauconietWaarden)
df = gm.search(
query=PropertyIsEqualTo(
propertyname='pkey_grondmonster', literal='https://www.dov.vlaanderen.be/data/grondmonster/2017-172111')
)
df.head()
"""
Explanation: List all glauconite fields for Grondmonsters
In the default Grondmonster dataframe, only the total glauconite is available. Using the following example, you can however request all different glauconite fields available in DOV: 'glauconiet_totaal', 'glauconiet_KL63', 'glauconiet_TSS' and 'glauconiet_GR500'.
End of explanation
"""
|
backmari/moose | modules/level_set/tests/verification/1d_level_set_mms/LevelsetMMS.ipynb | lgpl-2.1 | %matplotlib inline
import glob
from sympy import *
import numpy
import matplotlib.pyplot as plt
import pandas
init_printing()
"""
Explanation: Transient MMS Verification for Levelset Equation
Computes the forcing function for a transient MMS test, the selected solution is designed to reach steady-state rapidly.
Load the necessary python libraries.
End of explanation
"""
x,t,a,b= symbols('x t a b')
u = 1+a*exp(1/(10*t))*sin(2*pi/b*x)
u
"""
Explanation: Define the Manufactured solution
Define the assumed (exact) solution as a fuction of x, y, and t that converges rapidly in time.
End of explanation
"""
f = diff(u, t) + diff(u, x)
f
"""
Explanation: Compute the forcing function.
End of explanation
"""
str(u).replace('**', '^')
str(f).replace('**', '^')
"""
Explanation: Build a string of the exact and forcing function to be copied to the input file (levelset_mms.i).
The only syntax that needs to change to make this string work with MOOSE ParsedFunction is the exponent operator.
End of explanation
"""
filenames = glob.glob('level_set_mms_0*.csv')
print filenames
results = []
for fname in filenames:
results.append(pandas.DataFrame(pandas.read_csv(fname, index_col='time')))
"""
Explanation: Demonstrate how the solution reaches stead-state.
Verification of Computed Solution
Comparision of exact and compute solution at a point.
Read results files.
End of explanation
"""
times = results[-1]['point'].keys()
pfunc = Lambda(t, u.subs([(x, 0.1), (a, 1), (b, 8)]))
exact = pandas.Series([pfunc(i).evalf() for i in times], index=times)
"""
Explanation: Compute the exact solution at a point (0.1).
End of explanation
"""
fig = plt.figure(figsize=(18,9))
axes = fig.add_subplot(111)
axes.plot(exact.keys(), exact.values, '-k', linewidth=3, label='exact') # pandas.Series plot method not working
for i in range(len(results)):
x = results[i]['point'].keys()
y = results[i]['point'].values
axes.plot(x, y, label='Level ' + str(i))
plt.legend(loc='lower left')
"""
Explanation: Show the compute results with the exact solution.
End of explanation
"""
n = len(results)
error = numpy.zeros(n)
h = numpy.zeros(n)
for i in range(n):
error[i] = results[i]['error'].iloc[-1]
h[i] = 1./results[i]['h'].iloc[-1]
"""
Explanation: Convergence Plot
Extract error and number of dofs.
End of explanation
"""
coefficients = numpy.polyfit(numpy.log10(h), numpy.log10(error), 1)
coefficients
fig = plt.figure(figsize=(18,9))
axes = fig.add_subplot(111)
axes.plot(h, error, 'sk')
axes.set(xscale='log', yscale='log', xlabel='1/h', ylabel='L2 Error',)
polynomial = numpy.poly1d(coefficients)
axes.plot(h, pow(10, polynomial(numpy.log10(h))))
axes.grid(True, which='both')
plt.text(h[0], error[-1], 'Slope: ' + str(coefficients[0]), fontsize=14)
"""
Explanation: Fit line to data.
End of explanation
"""
|
planet-os/notebooks | api-examples/gefs-api.ipynb | mit | %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import dateutil.parser
import datetime
from urllib.request import urlopen, Request
import simplejson as json
import pandas as pd
def extract_reference_time(API_data_loc):
"""Find reference time that corresponds to most complete forecast. Should be the earliest value."""
reftimes = set()
for i in API_data_loc['entries']:
reftimes.update([i['axes']['reftime']])
reftimes=list(reftimes)
if len(reftimes)>1:
reftime = reftimes[0] if dateutil.parser.parse(reftimes[0])<dateutil.parser.parse(reftimes[1]) else reftimes[1]
else:
reftime = reftimes[0]
return reftime
#latitude = 21.205
#longitude = -158.35
latitude = 58
longitude = 26
apikey = open('APIKEY').read().strip()
num_ens = 10
prec_var = "Total_precipitation_surface_6_Hour_Accumulation_ens"
pres_var = "Pressure_surface_ens"
"""
Explanation: Planet OS API demo for GEFS
<font color=red>This notebook is not working right now as GEFS Ensambled forecast is updating only by request! Let us know if you would like to use it</font>
Note: this notebook requires python3.
This notebook is an introduction to the PlanetOS API data format using the GFS Global Forecast dataset.
API documentation is available at http://docs.planetos.com.
If you have questions or comments, join the Planet OS Slack community to chat with our development team.
For general information on usage of IPython/Jupyter and Matplotlib, please refer to their corresponding documentation. https://ipython.org/ and http://matplotlib.org/
GEFS global probabilistic weather forecast
GEFS is a probabilistic weather forecast system, which is composed of 20 model ensemble members, which differ by small fluctuations in model initial conditions. Probabilistic forecasts try to mimic the natural chaotic nature of the atmosphere and usually have higher forecast skill than deterministic weather forecast, after third day or so. However, their interpretation is not trivial and with this demo we encourage users to have deeper look into this kind of data.
In this tutorial we analyse precipitation and surface pressure data.
End of explanation
"""
API_meta_url = "http://api.planetos.com/v1/datasets/noaa-ncep_gefs?apikey={}".format(apikey)
request = Request(API_meta_url)
response = urlopen(request)
API_meta = json.loads(response.read())
print(API_meta_url)
"""
Explanation: GEFS is a model with lots of output variables, which may also change depending of which particular output file you are checking. Analyse the metadata first, filter for variables we may be interested in and limit the API request.
Warning: if requesting too many variables, you may get gateway timeout error. If this happens, try to specify only one context or variable.
End of explanation
"""
[i['name'] for i in API_meta['Variables'] if 'pressure' in i['name'].lower() and 'surface' in i['name'].lower()]
"""
Explanation: Filter by parameter name, in this example we wan't to find pressure at surface.
End of explanation
"""
API_url = "http://api.planetos.com/v1/datasets/noaa-ncep_gefs/point?lon={0}&lat={1}&count=2000&verbose=false&apikey={2}&var={3}".format(longitude,latitude,apikey,prec_var)
request = Request(API_url)
response = urlopen(request)
API_data_prec = json.loads(response.read())
print(API_url)
"""
Explanation: API request for precipitation
End of explanation
"""
API_url = "http://api.planetos.com/v1/datasets/noaa-ncep_gefs/point?lon={0}&lat={1}&count=2000&verbose=false&apikey={2}&var={3}".format(longitude,latitude,apikey,pres_var)
request = Request(API_url)
response = urlopen(request)
API_data_pres = json.loads(response.read())
print(API_url)
"""
Explanation: API request for surface pressure
End of explanation
"""
## first collect data to dictionaries, then convert to Pandas DataFrame
pres_data_dict = {}
pres_time_dict = {}
prec_data_dict = {}
prec_time_dict = {}
for i in range(0, num_ens):
pres_data_dict[i] = []
pres_time_dict[i] = []
prec_data_dict[i] = []
prec_time_dict[i] = []
for i in API_data_pres['entries']:
reftime = extract_reference_time(API_data_pres)
if reftime == i['axes']['reftime']:
## print("reftest", int(i['axes']['ens']))
pres_data_dict[int(i['axes']['ens'])].append(i['data'][pres_var])
pres_time_dict[int(i['axes']['ens'])].append(dateutil.parser.parse(i['axes']['time']))
for i in API_data_prec['entries']:
reftime = extract_reference_time(API_data_prec)
if reftime == i['axes']['reftime']:
prec_data_dict[int(i['axes']['ens'])].append(i['data'][prec_var])
prec_time_dict[int(i['axes']['ens'])].append(dateutil.parser.parse(i['axes']['time']))
## check if time scales are equal?!
for i in range(2,num_ens):
##print(i, np.array(pres_time_dict[1]).shape, np.array(pres_time_dict[i]).shape)
if np.amax(np.array(pres_time_dict[1])-np.array(pres_time_dict[i])) != datetime.timedelta(0):
print('timeproblem',np.amax(np.array(pres_time_dict[1])-np.array(pres_time_dict[i])))
pres_pd = pd.DataFrame(pres_data_dict)
prec_pd = pd.DataFrame(prec_data_dict)
prec_pd
"""
Explanation: Read data from JSON responce and convert to numpy array for easier plotting
End of explanation
"""
fig, (ax0, ax2) = plt.subplots(nrows=2,figsize=(20,12))
ax0.boxplot(prec_pd)
ax0.grid()
ax0.set_title("Simple ensamble distribution")
ax0.set_ylabel('Precipitation mm/6h')
ax2.boxplot(np.cumsum(prec_pd,axis=0))
ax2.grid()
ax2.set_title("Cumulative precipitation distribution")
ax2.set_ylabel('Precipitation mm/6h')
ax2.set_xlabel('Forecast steps (each is 6h)')
"""
Explanation: Precipitation plots
Let's first plot boxplots of ensamble members, showing 6h precipitation and accumulated precipitation.
End of explanation
"""
fig=plt.figure(figsize=(20,10))
plt.boxplot(pres_pd)
plt.grid()
plt.title('Ensamble distribution')
plt.ylabel('Pressure Pa')
plt.xlabel('Forecast steps (each is 6h)')
fig=plt.figure(figsize=(20,10))
plt.plot(pres_pd)
plt.grid()
plt.ylabel('Pressure Pa')
plt.xlabel('Forecast steps (each is 6h)')
"""
Explanation: From simple distribution it is immediately visible that ensamble members may have very different values at particular time. Interpretation of this is highly dependent on physical quantity: for precipitation this may reflect changes in actual weather pattern or just small changes in timing of the precipitation event. To get rid of the latter, we use the accumulated precipitation. From this plot it is more evident (depends on particular forecast of course), that variability is smaller. For longer forecasts it may be more reasonable to check only 24h accumulated precipitation.
Surface pressure plots
Surface pressure variation is better descriptor for actual uncertainty than precipitation
End of explanation
"""
|
raschuetz/foundations-homework | 06/homework-6-schuetz.ipynb | mit | apikey = '34b41fe7b9db6c1bd5f8ea3492bca332'
coordinates = {'San Antonio': '29.4241,-98.4936', 'Miami': '25.7617,-80.1918', 'Central Park': '40.7829,-73.9654'}
import requests
url = 'https://api.forecast.io/forecast/' + apikey + '/' + coordinates['San Antonio']
response = requests.get(url)
data = response.json()
# #Is it in my time zone?
# #temp. Answer: dict
# print(type(data))
# #temp. Answer: ['offset', 'latitude', 'hourly', 'flags', 'minutely', 'longitude', 'timezone', 'daily', 'currently']
# print(data.keys())
# #temp. Answer: dict
# print(type(data['currently']))
# #temp. Answer: ['windSpeed', 'time', 'dewPoint', 'icon', 'temperature', 'apparentTemperature', 'precipProbability',
#'visibility', 'cloudCover', 'nearestStormDistance', 'pressure', 'windBearing', 'ozone', 'humidity', 'precipIntensity',
#'summary', 'nearestStormBearing']
# print(data['currently'].keys())
# #temp. It's in my time zone!
# print(data['currently']['time'])
#Oh, this would have been easier:
#temp. Answer: America/Chicago
print(data['timezone'])
"""
Explanation: You'll be using the Dark Sky Forecast API from Forecast.io, available at https://developer.forecast.io. It's a pretty simple API, but be sure to read the documentation!
1) Make a request from the Forecast.io API for where you were born (or lived, or want to visit!).
Tip: Once you've imported the JSON into a variable, check the timezone's name to make sure it seems like it got the right part of the world!
Tip 2: How is north vs. south and east vs. west latitude/longitude represented? Is it the normal North/South/East/West?
End of explanation
"""
print('The current wind speed is', data['currently']['windSpeed'], 'miles per hour.')
print('It feels', round(data['currently']['apparentTemperature'] - data['currently']['temperature'], 2), 'degrees Fahrenheit warmer than it actually is.')
"""
Explanation: 2) What's the current wind speed? How much warmer does it feel than it actually is?
End of explanation
"""
# #temp. Answer: dict
# print(type(data['daily']))
# #temp. Answer: ['summary', 'data', 'icon']
# print(data['daily'].keys())
# #temp. Answer: list
# print(type(data['daily']['data']))
# #temp. It's a list of dictionaries
# #this time means Wed, 08 Jun 2016 05:00:00 GMT, which is currently today
# print(data['daily']['data'][0])
# #this time means Thu, 09 Jun 2016 05:00:00 GMT
# print(data['daily']['data'][1])
# #temp. Answer: 8
# print(len(data['daily']['data']))
# #temp. Answer: ['windSpeed', 'time', 'sunsetTime', 'precipIntensityMaxTime', 'apparentTemperatureMax', 'windBearing',
# #'temperatureMinTime', 'precipIntensityMax', 'precipProbability', 'sunriseTime', 'temperatureMin',
# #'apparentTemperatureMaxTime', 'precipIntensity', 'apparentTemperatureMinTime', 'temperatureMax', 'dewPoint',
# #'temperatureMaxTime', 'icon', 'moonPhase', 'precipType', 'visibility', 'cloudCover', 'pressure',
# #'apparentTemperatureMin', 'ozone', 'humidity', 'summary']
# print(data['daily']['data'][0].keys())
today_moon = data['daily']['data'][0]['moonPhase']
print(100 * (1 - abs(1 - (today_moon * 2))), 'percent of the moon is visible today.')
"""
Explanation: 3) The first daily forecast is the forecast for today. For the place you decided on up above, how much of the moon is currently visible?
End of explanation
"""
print('The difference between today\'s high and low temperatures is', round(data['daily']['data'][0]['temperatureMax'] - data['daily']['data'][0]['temperatureMin'], 2), 'degrees Fahrenheit.')
"""
Explanation: 4) What's the difference between the high and low temperatures for today?
End of explanation
"""
daily_forecast = data['daily']['data']
print('Starting with today\'s, the forecasts for the next week are for highs of:')
for day in daily_forecast:
if 85 <= day['temperatureMax']:
warmth = 'hot'
elif 70 <= day['temperatureMax'] < 85:
warmth = 'warm'
else:
warmth = 'cold'
print(day['temperatureMax'], 'degrees Fahrenheit, a pretty', warmth, 'day.')
"""
Explanation: 5) Loop through the daily forecast, printing out the next week's worth of predictions. I'd like to know the high temperature for each day, and whether it's hot, warm, or cold, based on what temperatures you think are hot, warm or cold.
End of explanation
"""
fl_url = 'https://api.forecast.io/forecast/' + apikey + '/' + coordinates['Miami']
fl_response = requests.get(url)
fl_data = fl_response.json()
# #temp. Answer: dict
# print(type(fl_data['hourly']))
# #temp. Answer: ['summary', 'data', 'icon']
# print(fl_data['hourly'].keys())
# #temp. Answer: list
# print(type(fl_data['hourly']['data']))
# #temp. Answer: 49
# print(len(fl_data['hourly']['data']))
# #temp. It's a list of dictionaries
# #the top of this hour
# print(fl_data['hourly']['data'][0])
# #the top of next hour
# print(fl_data['hourly']['data'][1])
# #temp. Answer: ['precipType', 'time', 'apparentTemperature', 'windSpeed', 'icon', 'summary', 'precipProbability',
# #'visibility', 'cloudCover', 'pressure', 'windBearing', 'ozone', 'humidity', 'precipIntensity', 'temperature',
# #'dewPoint']
# print(fl_data['hourly']['data'][0].keys())
# # how many hours are left in the day in EDT: (24 - ((time % 86400)/3600 - 4))
# times = [1465423200, 1465426800]
# for time in times:
# print (24 - ((time % 86400)/3600 - 4))
hourly_data = fl_data['hourly']['data']
hours_left = range(int(24 - ((hourly_data[0]['time'] % 86400)/3600 - 4)))
print('Starting with this hour, the hourly forecasts for the rest of the day are for:')
for hour in hours_left:
if hourly_data[hour]['cloudCover'] > .5:
print(hourly_data[hour]['temperature'], 'degrees Fahrenheit and cloudy')
else:
print(hourly_data[hour]['temperature'], 'degrees Fahrenheit')
"""
Explanation: 6) What's the weather looking like for the rest of today in Miami, Florida? I'd like to know the temperature for every hour, and if it's going to have cloud cover of more than 0.5 say "{temperature} and cloudy" instead of just the temperature.
End of explanation
"""
decades = range(3)
for decade in decades:
cp_url = 'https://api.forecast.io/forecast/' + apikey + '/' + coordinates['Central Park'] + ',' + str(10 * decade + 1980) + '-12-25T12:00:00'
cp_response = requests.get(cp_url)
cp_data = cp_response.json()
print('On Christmas Day in', str(1980 + decade * 10) + ', the high in Central Park was', cp_data['daily']['data'][0]['temperatureMax'], 'degrees Fahrenheit.')
"""
Explanation: 7) What was the temperature in Central Park on Christmas Day, 1980? How about 1990? 2000?
Tip: You'll need to use UNIX time, which is the number of seconds since January 1, 1970. Google can help you convert a normal date!
Tip: You'll want to use Forecast.io's "time machine" API at https://developer.forecast.io/docs/v2
End of explanation
"""
|
hcp4715/AnalyzingExpData | HDDM/Within_models_from_tutorial.ipynb | cc0-1.0 | # check which python is in use.
import sys
print('Notebook is running:', sys.executable)
# further check your python version
from platform import python_version
print('The current HDDM version is', python_version())
# If you are sure that conda is installed, also check the package that install
#!conda list # list the conda
import hddm, IPython
import numpy as np
import pandas as pd
print('The current HDDM version is', hddm.__version__) # 0.8.0
# Warning:`IPython.parallel` package has been deprecated since IPython 4.0.
print('The current IPython version is', IPython.__version__)
print('The current Numpy version is', np.__version__)
print('The current Pandas version is', pd.__version__)
%matplotlib inline
# Preparation
import os, time, csv
from datetime import date
import random
import kabuki, hddm
from patsy import dmatrix
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
"""
Explanation: Testing within-subject models
Issues need to be solved:
* Add more than $v$ to the model;
* Stim-coding with HDDMRegression (there was a tutorial on this)
* Flexible design
End of explanation
"""
import sys
# the following will save all the stdout to the txt file, which is not necessary
# sys.stdout = open('ModelRecoveryOutput.txt', 'w')
"""
Explanation: Let's first replicate the tutorial on HDDM website
http://ski.clps.brown.edu/hddm_docs/tutorial_regression_stimcoding.html
End of explanation
"""
n_subjects = 10
trials_per_level = 150
"""
Explanation: Creating simulated data for the experiment
set number of subjects and number of trials per level for the simulated experiment
End of explanation
"""
level1a = {'v':.3, 'a':2, 't':.3, 'sv':0, 'z':.5, 'sz':0, 'st':0}
level2a = {'v':.4, 'a':2, 't':.3, 'sv':0, 'z':.6, 'sz':0, 'st':0}
level3a = {'v':.5, 'a':2, 't':.3, 'sv':0, 'z':.7, 'sz':0, 'st':0}
"""
Explanation: Next, set up parameters of the DDM for three levels and the first stimulus.
End of explanation
"""
level1b = {'v':.3, 'a':2, 't':.3, 'sv':0, 'z':.5, 'sz':0, 'st':0}
level2b = {'v':.4, 'a':2, 't':.3, 'sv':0, 'z':.4, 'sz':0, 'st':0}
level3b = {'v':.5, 'a':2, 't':.3, 'sv':0, 'z':.3, 'sz':0, 'st':0}
"""
Explanation: Next, set up parameters for the second stimulus, where v is the same as for the first stimulus. This is different for z. In particular: z(stimulus_b) = 1 - z(stimulus_a).
End of explanation
"""
random.seed(123)
np.random.seed(123)
data_a, params_a = hddm.generate.gen_rand_data({'level1': level1a,
'level2': level2a,
'level3': level3a},
size=trials_per_level,
subjs=n_subjects)
data_b, params_b = hddm.generate.gen_rand_data({'level1': level1b,
'level2': level2b,
'level3': level3b},
size=trials_per_level,
subjs=n_subjects)
data_a['stimulus'] = pd.Series(np.ones((len(data_a))), index=data_a.index)
data_b['stimulus'] = pd.Series(np.ones((len(data_b)))*2, index=data_a.index)
mydata = data_a.append(data_b,ignore_index=True)
mydata.head(10)
"""
Explanation: Now, let's generate data for both stimuli
End of explanation
"""
tmpdata = mydata[mydata['subj_idx'] <= 0]
tmpdata
stim = (dmatrix('0 + C(s, [[1], [-1]])',
{'s':tmpdata.stimulus.loc[tmpdata.index]},
return_type="dataframe")
)
#tmp = np.multiply(tmpdata[['stimulus']], stim[:])
tmp = np.multiply(tmpdata.stimulus.loc[tmpdata.index].to_frame(),stim)
#1/(1+np.exp(-((np.asarray(mydata.stimulus.iloc[mydata.index])) * stim)))
type(stim)
#stim.index
#type(mydata.stimulus.iloc[mydata.index])
tmp
tmp2 = tmpdata.stimulus.loc[tmpdata.index]
tmp2.to_frame().shape
# x is a pandas Series, it should be converted to dataframe to do the multiplication.
def z_link_func(x, data=mydata):
stim = (dmatrix('0 + C(s, [[1], [-1]])',
{'s':data.stimulus.loc[x.index]},
return_type="dataframe")
# stim = (np.asarray(dmatrix('0 + C(s, [[1], [-1]])',
#{'s':data.stimulus.ix[x.index]})) # original .ix is deprecated.
)
#print(x.shape)
return 1/(1+np.exp(-np.multiply(x.to_frame(), stim)))
#return 1 /( 1+np.exp(-(x * stim)))
"""
Explanation: Setting up the HDDM regression model
The parameter z is bound between 0 and 1, but the standard linear regression does not generate values between 0 and 1. Therefore, we use a link-function, here the inverse logit $1/(1+exp(-x))$, which transform values between plus and minus infinity into values ranging from (just above) 0 to (nearly) 1. [this link function is related to logistic regression].
Next, we need to ensure that bias is $z$ for one stimulus and $1-z$ for the other stimulus. To achive this, we can simply multiply the regression output for one stimulus with $-1$. This is implemented here by dot-multiplying the regression output "x" (which is an array) with equally sized array "stim", which is 1 for all stimulus A trials and -1 for stimulus B trials. We use the patsy command dmatrix to generate such an array from the stimulus column of our stimulated data.
Note
* dot-multiplying is vague here, it should be Hadamard Product, see here.
End of explanation
"""
z_reg = {'model': 'z ~ 1 + C(condition)', 'link_func':z_link_func}
"""
Explanation: Now, we set up the regression models for z and v and also include the link functions. The relevant string here used by patsy is 1 + C(condition). This will generate a design matrix with an intercept (that's what the 1 is for) and two dummy variables for remaining levels. (The column in which the levels are coded has the default name condition):
End of explanation
"""
v_reg = {'model': 'v ~ 1 + C(condition)', 'link_func': lambda x:x}
"""
Explanation: For v the link function is simply $x = x$, b/c no transformation is needed. (However, you could also analyze this experiment with response coded data. Then you would not stimulus code z but v and you would have to multiply the v for one condition with -1, with a link function like the one for z, but without the additional logit transform):
End of explanation
"""
reg_descr = [z_reg, v_reg]
"""
Explanation: Now, we can finally put the regression description for the hddm model together. The general for this is [{'model': 'outcome_parameter ~ patsy_design_string', 'link_func': your_link_func}, {...}, ...]
End of explanation
"""
m_reg = hddm.HDDMRegressor(mydata, reg_descr, include='z')
"""
Explanation: The last step before running the model is to construct the complete hddm regression model by adding data etc.
End of explanation
"""
m_reg.sample(5000, burn=200, dbname='within_effect.db', db='pickle')
m_reg.save('within_effect')
"""
Explanation: Now, we start the sampling, which takes a long time
End of explanation
"""
m_reg.print_stats()
"""
Explanation: Comparing generative and recovered model parameters
First, let's print the model stats
End of explanation
"""
# load data
df1a = hddm.load_csv('df1a.v.hddm_stim.csv')
df = df1a
# randomly chose 10 participants from the dataset
df_subj = df['subj_idx'].unique()
random.seed(10)
df_test_list = []
for i in range(10):
pos = random.randint(0, (len(df_subj)-1))
df_test_list.append(df_subj[pos])
df_test = df[df['subj_idx'].isin(df_test_list)]
def z_link_func(x, data=mydata):
stim = (dmatrix('0 + C(s, [[1], [-1]])',
{'s':data.stimulus.loc[x.index]},
return_type="dataframe")
)
return 1/(1+np.exp(-np.multiply(x.to_frame(), stim)))
z_reg = {'model': 'z ~ 1 + C(condition)', 'link_func': z_link_func}
v_reg = {'model': 'v ~ 1 + C(condition)', 'link_func': lambda x:x}
a_reg = {'model': 'a ~ 1 + C(condition)', 'link_func': lambda x:x}
reg_descr = [z_reg, v_reg]
m_reg = hddm.HDDMRegressor(mydata, reg_descr, include='z')
m_reg.sample(5000, burn=200, dbname='within_effect.db', db='pickle')
m_within_subj = hddm.HDDMRegressor(df1a, "v ~ C(match, val, Treatment(’Mismatch.Neutral’))")
"""
Explanation: Working with real data
Now let's move to the real data
End of explanation
"""
|
jcharit1/Identifying-Ad-Images | code/model_training.ipynb | mit | import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import json
from IPython.display import Image
from IPython.core.display import HTML
"""
Explanation: Model Training
Code for finding the best predictive model
Author: Jimmy Charité
Email: [email protected]
Date: January 8, 2017
Directory & Initial Packages
End of explanation
"""
retval=os.chdir("..")
clean_data=pd.read_csv("./clean_data/modeling_data.csv")
clean_data.head()
"""
Explanation: The default directory is the code subdirectory. Changing to the main repo directory above.
Upload Data
End of explanation
"""
my_rand_state=0
"""
Explanation: Random States
End of explanation
"""
from sklearn.model_selection import train_test_split
X = (clean_data.iloc[:,:-1]).as_matrix()
y = (clean_data.iloc[:,-1]).tolist()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,
random_state=my_rand_state)
"""
Explanation: Training and Testing Split
End of explanation
"""
from imblearn.over_sampling import RandomOverSampler, SMOTE
from imblearn.under_sampling import RandomUnderSampler, TomekLinks
ros = RandomOverSampler(random_state=my_rand_state)
smote = SMOTE(random_state=my_rand_state)
rus = RandomUnderSampler(random_state=my_rand_state)
tl = TomekLinks(random_state=my_rand_state)
"""
Explanation: Class Imbalance Corrections
End of explanation
"""
from sklearn.feature_selection import VarianceThreshold
vt = VarianceThreshold()
threshold=[p*(1-p) for p in [0, 0.05, 0.1, 0.15]]
"""
Explanation: Feature Selection
End of explanation
"""
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
"""
Explanation: Note, since the formula for the variance of binary variables is p*(1-p), where p is the proportion of times that the binary variable is 1, I use the proportion to define the variance thresholds. The max variance is 0.25 at p=0.5.
Classification Models
End of explanation
"""
nb_clf=GaussianNB()
priors=[None]
qda_clf=QuadraticDiscriminantAnalysis()
reg_param=[0.0, 0.25, 0.5, 0.75]
log_clf=LogisticRegression()
C=[0.001 , 0.01, 10, 100,1000]
knn_clf=KNeighborsClassifier(n_jobs=4)
n_neighbors=list(range(1,17,2))
weights=['uniform','distance']
rf_clf=RandomForestClassifier()
n_estimators=[100]
max_features=[.1,.3,.5]
class_weight=['balanced']
class_weight.extend([{1: w} for w in [1, 2, 10]])
"""
Explanation: Although tuning is not necessary for Naive Bayes, I pass the default parameters of those models to GridSearchCV anyway so that I can do a direct pair-wise comparison with the other models across the different steps of cross-validation.
In the interest of time, I didn't use the SVM classifier.
I used a training set to tune the model's hyperparameters and a test set to evaluate them. With more time and data, I would use repeated nested cross-validation to create a more robust model tuning, selection, and performance assessment workflow.
End of explanation
"""
from imblearn import pipeline #needed if mixing imblearn with sklearn classes
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
n_jobs=4
n_folds=10
skfold = StratifiedKFold(n_splits=n_folds,random_state=my_rand_state, shuffle=False)
"""
Explanation: Creating Pipelines
End of explanation
"""
nb_clf_b = pipeline.Pipeline(steps=[('vt',vt),('clf',nb_clf)])
nb_clf_est_b = GridSearchCV(estimator=nb_clf_b,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,clf__priors=priors))
nb_clf_ros = pipeline.Pipeline(steps=[('ros',ros),('vt',vt),
('clf',nb_clf)])
nb_clf_est_ros = GridSearchCV(estimator=nb_clf_ros,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__priors=priors))
nb_clf_smote = pipeline.Pipeline(steps=[('smote',smote),('vt',vt),
('clf',nb_clf)])
nb_clf_est_smote = GridSearchCV(estimator=nb_clf_smote,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__priors=priors))
nb_clf_rus = pipeline.Pipeline(steps=[('rus',rus),('vt',vt),
('clf',nb_clf)])
nb_clf_est_rus = GridSearchCV(estimator=nb_clf_rus,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__priors=priors))
nb_clf_tl = pipeline.Pipeline(steps=[('tl',tl),('vt',vt),
('clf',nb_clf)])
nb_clf_est_tl = GridSearchCV(estimator=nb_clf_tl,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__priors=priors))
"""
Explanation: Naive Bayes Estimators
End of explanation
"""
qda_clf_b = pipeline.Pipeline(steps=[('vt',vt),('clf',qda_clf)])
qda_clf_est_b = GridSearchCV(estimator=qda_clf_b,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,clf__reg_param=reg_param))
qda_clf_ros = pipeline.Pipeline(steps=[('ros',ros),('vt',vt),
('clf',qda_clf)])
qda_clf_est_ros = GridSearchCV(estimator=qda_clf_ros,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__reg_param=reg_param))
qda_clf_smote = pipeline.Pipeline(steps=[('smote',smote),('vt',vt),
('clf',qda_clf)])
qda_clf_est_smote = GridSearchCV(estimator=qda_clf_smote,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__reg_param=reg_param))
qda_clf_rus = pipeline.Pipeline(steps=[('rus',rus),('vt',vt),
('clf',qda_clf)])
qda_clf_est_rus = GridSearchCV(estimator=qda_clf_rus,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__reg_param=reg_param))
qda_clf_tl = pipeline.Pipeline(steps=[('tl',tl),('vt',vt),
('clf',qda_clf)])
qda_clf_est_tl = GridSearchCV(estimator=qda_clf_tl,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__reg_param=reg_param))
"""
Explanation: QDA Estimators
End of explanation
"""
log_clf_b = pipeline.Pipeline(steps=[('vt',vt),('clf',log_clf)])
log_clf_est_b = GridSearchCV(estimator=log_clf_b,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,clf__C=C,
clf__class_weight=class_weight))
log_clf_ros = pipeline.Pipeline(steps=[('ros',ros),('vt',vt),
('clf',log_clf)])
log_clf_est_ros = GridSearchCV(estimator=log_clf_ros,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,clf__C=C,
clf__class_weight=class_weight))
log_clf_smote = pipeline.Pipeline(steps=[('smote',smote),('vt',vt),
('clf',log_clf)])
log_clf_est_smote = GridSearchCV(estimator=log_clf_smote,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,clf__C=C,
clf__class_weight=class_weight))
log_clf_rus = pipeline.Pipeline(steps=[('rus',rus),('vt',vt),
('clf',log_clf)])
log_clf_est_rus = GridSearchCV(estimator=log_clf_rus,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,clf__C=C,
clf__class_weight=class_weight))
log_clf_tl = pipeline.Pipeline(steps=[('tl',tl),('vt',vt),
('clf',log_clf)])
log_clf_est_tl = GridSearchCV(estimator=log_clf_tl,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,clf__C=C,
clf__class_weight=class_weight))
"""
Explanation: Logistic Estimators
End of explanation
"""
knn_clf_b = pipeline.Pipeline(steps=[('vt',vt),('clf',knn_clf)])
knn_clf_est_b = GridSearchCV(estimator=knn_clf_b,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__n_neighbors=n_neighbors,
clf__weights=weights))
knn_clf_ros = pipeline.Pipeline(steps=[('ros',ros),('vt',vt),
('clf',knn_clf)])
knn_clf_est_ros = GridSearchCV(estimator=knn_clf_ros,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__n_neighbors=n_neighbors,
clf__weights=weights))
knn_clf_smote = pipeline.Pipeline(steps=[('smote',smote),('vt',vt),
('clf',knn_clf)])
knn_clf_est_smote = GridSearchCV(estimator=knn_clf_smote,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__n_neighbors=n_neighbors,
clf__weights=weights))
knn_clf_rus = pipeline.Pipeline(steps=[('rus',rus),('vt',vt),
('clf',knn_clf)])
knn_clf_est_rus = GridSearchCV(estimator=knn_clf_rus,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__n_neighbors=n_neighbors,
clf__weights=weights))
knn_clf_tl = pipeline.Pipeline(steps=[('tl',tl),('vt',vt),
('clf',knn_clf)])
knn_clf_est_tl = GridSearchCV(estimator=knn_clf_tl,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__n_neighbors=n_neighbors,
clf__weights=weights))
"""
Explanation: KNN Estimators
End of explanation
"""
rf_clf_b = pipeline.Pipeline(steps=[('vt',vt),('clf',rf_clf)])
rf_clf_est_b = GridSearchCV(estimator=rf_clf_b,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__n_estimators=n_estimators,
clf__max_features=max_features,
clf__class_weight=class_weight))
rf_clf_ros = pipeline.Pipeline(steps=[('ros',ros),('vt',vt),
('clf',rf_clf)])
rf_clf_est_ros = GridSearchCV(estimator=rf_clf_ros,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__n_estimators=n_estimators,
clf__max_features=max_features,
clf__class_weight=class_weight))
rf_clf_smote = pipeline.Pipeline(steps=[('smote',smote),('vt',vt),
('clf',rf_clf)])
rf_clf_est_smote = GridSearchCV(estimator=rf_clf_smote,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__n_estimators=n_estimators,
clf__max_features=max_features,
clf__class_weight=class_weight))
rf_clf_rus = pipeline.Pipeline(steps=[('rus',rus),('vt',vt),
('clf',rf_clf)])
rf_clf_est_rus = GridSearchCV(estimator=rf_clf_rus,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__n_estimators=n_estimators,
clf__max_features=max_features,
clf__class_weight=class_weight))
rf_clf_tl = pipeline.Pipeline(steps=[('tl',tl),('vt',vt),
('clf',rf_clf)])
rf_clf_est_tl = GridSearchCV(estimator=rf_clf_tl,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(vt__threshold=threshold,
clf__n_estimators=n_estimators,
clf__max_features=max_features,
clf__class_weight=class_weight))
"""
Explanation: Random Forest Estimators
End of explanation
"""
from sklearn.externals import joblib
"""
Explanation: Fitting Estimators
End of explanation
"""
nb_clf_est_b.fit(X_train,y_train)
joblib.dump(nb_clf_est_b, './other_output/nb_clf_est_b.pkl')
nb_clf_est_ros.fit(X_train,y_train)
joblib.dump(nb_clf_est_ros, './other_output/nb_clf_est_ros.pkl')
nb_clf_est_smote.fit(X_train,y_train)
joblib.dump(nb_clf_est_smote, './other_output/nb_clf_est_smote.pkl')
nb_clf_est_rus.fit(X_train,y_train)
joblib.dump(nb_clf_est_rus, './other_output/nb_clf_est_rus.pkl')
nb_clf_est_tl.fit(X_train,y_train)
joblib.dump(nb_clf_est_tl, './other_output/nb_clf_est_tl.pkl')
"""
Explanation: Basic Estimators
Naive Bayes Estimators
End of explanation
"""
qda_clf_est_b.fit(X_train,y_train)
joblib.dump(qda_clf_est_b, './other_output/qda_clf_est_b.pkl')
qda_clf_est_ros.fit(X_train,y_train)
joblib.dump(qda_clf_est_ros, './other_output/qda_clf_est_ros.pkl')
qda_clf_est_smote.fit(X_train,y_train)
joblib.dump(qda_clf_est_smote, './other_output/qda_clf_est_smote.pkl')
qda_clf_est_rus.fit(X_train,y_train)
joblib.dump(qda_clf_est_rus, './other_output/qda_clf_est_rus.pkl')
qda_clf_est_tl.fit(X_train,y_train)
joblib.dump(qda_clf_est_tl, './other_output/qda_clf_est_tl.pkl')
"""
Explanation: QDA Estimators
End of explanation
"""
log_clf_est_b.fit(X_train,y_train)
joblib.dump(log_clf_est_b, './other_output/log_clf_est_b.pkl')
log_clf_est_ros.fit(X_train,y_train)
joblib.dump(log_clf_est_ros, './other_output/log_clf_est_ros.pkl')
log_clf_est_smote.fit(X_train,y_train)
joblib.dump(log_clf_est_smote, './other_output/log_clf_est_smote.pkl')
log_clf_est_rus.fit(X_train,y_train)
joblib.dump(log_clf_est_rus, './other_output/log_clf_est_rus.pkl')
log_clf_est_tl.fit(X_train,y_train)
joblib.dump(log_clf_est_tl, './other_output/log_clf_est_tl.pkl')
"""
Explanation: Logistic Estimators
End of explanation
"""
knn_clf_est_b.fit(X_train,y_train)
joblib.dump(knn_clf_est_b, './other_output/knn_clf_est_b.pkl')
knn_clf_est_ros.fit(X_train,y_train)
joblib.dump(knn_clf_est_ros, './other_output/knn_clf_est_ros.pkl')
knn_clf_est_smote.fit(X_train,y_train)
joblib.dump(knn_clf_est_smote, './other_output/knn_clf_est_smote.pkl')
knn_clf_est_rus.fit(X_train,y_train)
joblib.dump(knn_clf_est_rus, './other_output/knn_clf_est_rus.pkl')
knn_clf_est_tl.fit(X_train,y_train)
joblib.dump(knn_clf_est_tl, './other_output/knn_clf_est_tl.pkl')
"""
Explanation: KNN Estimators
End of explanation
"""
rf_clf_est_b.fit(X_train,y_train)
joblib.dump(rf_clf_est_b, './other_output/rf_clf_est_b.pkl')
rf_clf_est_ros.fit(X_train,y_train)
joblib.dump(rf_clf_est_ros, './other_output/rf_clf_est_ros.pkl')
rf_clf_est_smote.fit(X_train,y_train)
joblib.dump(rf_clf_est_smote, './other_output/rf_clf_est_smote.pkl')
rf_clf_est_rus.fit(X_train,y_train)
joblib.dump(rf_clf_est_rus, './other_output/rf_clf_est_rus.pkl')
rf_clf_est_tl.fit(X_train,y_train)
joblib.dump(rf_clf_est_tl, './other_output/rf_clf_est_tl.pkl')
"""
Explanation: Random Forest Estimators
End of explanation
"""
from sklearn.metrics import roc_curve, auc
"""
Explanation: Testing Estimators
Below I show the ROC curves for the models over the test data.
End of explanation
"""
nb_fpr, nb_tpr, _ = roc_curve(y_test,
nb_clf_est_b.predict_proba(X_test)[:,1])
nb_roc_auc = auc(nb_fpr, nb_tpr)
qda_fpr, qda_tpr, _ = roc_curve(y_test,
qda_clf_est_b.predict_proba(X_test)[:,1])
qda_roc_auc = auc(qda_fpr, qda_tpr)
log_fpr, log_tpr, _ = roc_curve(y_test,
log_clf_est_b.predict_proba(X_test)[:,1])
log_roc_auc = auc(log_fpr, log_tpr)
knn_fpr, knn_tpr, _ = roc_curve(y_test,
knn_clf_est_b.predict_proba(X_test)[:,1])
knn_roc_auc = auc(knn_fpr, knn_tpr)
rf_fpr, rf_tpr, _ = roc_curve(y_test,
rf_clf_est_b.predict_proba(X_test)[:,1])
rf_roc_auc = auc(rf_fpr, rf_tpr)
plt.plot(nb_fpr, nb_tpr, color='cyan', linestyle='--',
label='NB (area = %0.2f)' % nb_roc_auc, lw=2)
plt.plot(qda_fpr, qda_tpr, color='indigo', linestyle='--',
label='QDA (area = %0.2f)' % qda_roc_auc, lw=2)
plt.plot(log_fpr, log_tpr, color='seagreen', linestyle='--',
label='LOG (area = %0.2f)' % log_roc_auc, lw=2)
plt.plot(knn_fpr, knn_tpr, color='yellow', linestyle='--',
label='KNN (area = %0.2f)' % knn_roc_auc, lw=2)
plt.plot(rf_fpr, rf_tpr, color='blue', linestyle='--',
label='RF (area = %0.2f)' % rf_roc_auc, lw=2)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='k',
label='Luck')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curves of Basic Models')
plt.legend(loc="lower right")
plt.savefig('./plots/ROC_Basic.png', bbox_inches='tight')
plt.show()
"""
Explanation: Basic Estimators
End of explanation
"""
nb_fpr, nb_tpr, _ = roc_curve(y_test,
nb_clf_est_ros.predict_proba(X_test)[:,1])
nb_roc_auc = auc(nb_fpr, nb_tpr)
qda_fpr, qda_tpr, _ = roc_curve(y_test,
qda_clf_est_ros.predict_proba(X_test)[:,1])
qda_roc_auc = auc(qda_fpr, qda_tpr)
log_fpr, log_tpr, _ = roc_curve(y_test,
log_clf_est_ros.predict_proba(X_test)[:,1])
log_roc_auc = auc(log_fpr, log_tpr)
knn_fpr, knn_tpr, _ = roc_curve(y_test,
knn_clf_est_ros.predict_proba(X_test)[:,1])
knn_roc_auc = auc(knn_fpr, knn_tpr)
rf_fpr, rf_tpr, _ = roc_curve(y_test,
rf_clf_est_ros.predict_proba(X_test)[:,1])
rf_roc_auc = auc(rf_fpr, rf_tpr)
plt.plot(nb_fpr, nb_tpr, color='cyan', linestyle='--',
label='NB (area = %0.2f)' % nb_roc_auc, lw=2)
plt.plot(qda_fpr, qda_tpr, color='indigo', linestyle='--',
label='QDA (area = %0.2f)' % qda_roc_auc, lw=2)
plt.plot(log_fpr, log_tpr, color='seagreen', linestyle='--',
label='LOG (area = %0.2f)' % log_roc_auc, lw=2)
plt.plot(knn_fpr, knn_tpr, color='yellow', linestyle='--',
label='KNN (area = %0.2f)' % knn_roc_auc, lw=2)
plt.plot(rf_fpr, rf_tpr, color='blue', linestyle='--',
label='RF (area = %0.2f)' % rf_roc_auc, lw=2)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='k',
label='Luck')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curves of Models with Oversampling')
plt.legend(loc="lower right")
plt.savefig('./plots/ROC_ROS.png', bbox_inches='tight')
plt.show()
"""
Explanation: Estimators with Random Oversampling of Minority Class
End of explanation
"""
nb_fpr, nb_tpr, _ = roc_curve(y_test,
nb_clf_est_smote.predict_proba(X_test)[:,1])
nb_roc_auc = auc(nb_fpr, nb_tpr)
qda_fpr, qda_tpr, _ = roc_curve(y_test,
qda_clf_est_smote.predict_proba(X_test)[:,1])
qda_roc_auc = auc(qda_fpr, qda_tpr)
log_fpr, log_tpr, _ = roc_curve(y_test,
log_clf_est_smote.predict_proba(X_test)[:,1])
log_roc_auc = auc(log_fpr, log_tpr)
knn_fpr, knn_tpr, _ = roc_curve(y_test,
knn_clf_est_smote.predict_proba(X_test)[:,1])
knn_roc_auc = auc(knn_fpr, knn_tpr)
rf_fpr, rf_tpr, _ = roc_curve(y_test,
rf_clf_est_smote.predict_proba(X_test)[:,1])
rf_roc_auc = auc(rf_fpr, rf_tpr)
plt.plot(nb_fpr, nb_tpr, color='cyan', linestyle='--',
label='NB (area = %0.2f)' % nb_roc_auc, lw=2)
plt.plot(qda_fpr, qda_tpr, color='indigo', linestyle='--',
label='QDA (area = %0.2f)' % qda_roc_auc, lw=2)
plt.plot(log_fpr, log_tpr, color='seagreen', linestyle='--',
label='LOG (area = %0.2f)' % log_roc_auc, lw=2)
plt.plot(knn_fpr, knn_tpr, color='yellow', linestyle='--',
label='KNN (area = %0.2f)' % knn_roc_auc, lw=2)
plt.plot(rf_fpr, rf_tpr, color='blue', linestyle='--',
label='RF (area = %0.2f)' % rf_roc_auc, lw=2)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='k',
label='Luck')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curves of Models with SMOTE')
plt.legend(loc="lower right")
plt.savefig('./plots/ROC_SMOTE.png', bbox_inches='tight')
plt.show()
"""
Explanation: Interestingly, only the basic classifiers improve in predictive performance.
Estimators with SMOTE
End of explanation
"""
nb_fpr, nb_tpr, _ = roc_curve(y_test,
nb_clf_est_rus.predict_proba(X_test)[:,1])
nb_roc_auc = auc(nb_fpr, nb_tpr)
log_fpr, log_tpr, _ = roc_curve(y_test,
log_clf_est_rus.predict_proba(X_test)[:,1])
log_roc_auc = auc(log_fpr, log_tpr)
knn_fpr, knn_tpr, _ = roc_curve(y_test,
knn_clf_est_rus.predict_proba(X_test)[:,1])
knn_roc_auc = auc(knn_fpr, knn_tpr)
rf_fpr, rf_tpr, _ = roc_curve(y_test,
rf_clf_est_rus.predict_proba(X_test)[:,1])
rf_roc_auc = auc(rf_fpr, rf_tpr)
plt.plot(nb_fpr, nb_tpr, color='cyan', linestyle='--',
label='NB (area = %0.2f)' % nb_roc_auc, lw=2)
plt.plot(log_fpr, log_tpr, color='seagreen', linestyle='--',
label='LOG (area = %0.2f)' % log_roc_auc, lw=2)
plt.plot(knn_fpr, knn_tpr, color='yellow', linestyle='--',
label='KNN (area = %0.2f)' % knn_roc_auc, lw=2)
plt.plot(rf_fpr, rf_tpr, color='blue', linestyle='--',
label='RF (area = %0.2f)' % rf_roc_auc, lw=2)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='k',
label='Luck')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curves of Models with Undersampling')
plt.legend(loc="lower right")
plt.savefig('./plots/ROC_RUS.png', bbox_inches='tight')
plt.show()
"""
Explanation: Again, class imbalance corrections only benefit Naive Bayes and QDA.
Estimators with Random Undersampling of Majority Class
End of explanation
"""
nb_fpr, nb_tpr, _ = roc_curve(y_test,
nb_clf_est_tl.predict_proba(X_test)[:,1])
nb_roc_auc = auc(nb_fpr, nb_tpr)
qda_fpr, qda_tpr, _ = roc_curve(y_test,
qda_clf_est_tl.predict_proba(X_test)[:,1])
qda_roc_auc = auc(qda_fpr, qda_tpr)
log_fpr, log_tpr, _ = roc_curve(y_test,
log_clf_est_tl.predict_proba(X_test)[:,1])
log_roc_auc = auc(log_fpr, log_tpr)
knn_fpr, knn_tpr, _ = roc_curve(y_test,
knn_clf_est_tl.predict_proba(X_test)[:,1])
knn_roc_auc = auc(knn_fpr, knn_tpr)
rf_fpr, rf_tpr, _ = roc_curve(y_test,
rf_clf_est_tl.predict_proba(X_test)[:,1])
rf_roc_auc = auc(rf_fpr, rf_tpr)
plt.plot(nb_fpr, nb_tpr, color='cyan', linestyle='--',
label='NB (area = %0.2f)' % nb_roc_auc, lw=2)
plt.plot(qda_fpr, qda_tpr, color='indigo', linestyle='--',
label='QDA (area = %0.2f)' % qda_roc_auc, lw=2)
plt.plot(log_fpr, log_tpr, color='seagreen', linestyle='--',
label='LOG (area = %0.2f)' % log_roc_auc, lw=2)
plt.plot(knn_fpr, knn_tpr, color='yellow', linestyle='--',
label='KNN (area = %0.2f)' % knn_roc_auc, lw=2)
plt.plot(rf_fpr, rf_tpr, color='blue', linestyle='--',
label='RF (area = %0.2f)' % rf_roc_auc, lw=2)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='k',
label='Luck')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curves of Models with Tomek Link Removal')
plt.legend(loc="lower right")
plt.savefig('./plots/ROC_tl.png', bbox_inches='tight')
plt.show()
"""
Explanation: Training QDA with undersampling resulted in errors.
Estimators with Tomek Link Removal
End of explanation
"""
nb_fpr, nb_tpr, _ = roc_curve(y_test,
nb_clf_est_ros.predict_proba(X_test)[:,1])
nb_roc_auc = auc(nb_fpr, nb_tpr)
qda_fpr, qda_tpr, _ = roc_curve(y_test,
qda_clf_est_ros.predict_proba(X_test)[:,1])
qda_roc_auc = auc(qda_fpr, qda_tpr)
log_fpr, log_tpr, _ = roc_curve(y_test,
log_clf_est_b.predict_proba(X_test)[:,1])
log_roc_auc = auc(log_fpr, log_tpr)
knn_fpr, knn_tpr, _ = roc_curve(y_test,
knn_clf_est_rus.predict_proba(X_test)[:,1])
knn_roc_auc = auc(knn_fpr, knn_tpr)
rf_fpr, rf_tpr, _ = roc_curve(y_test,
rf_clf_est_b.predict_proba(X_test)[:,1])
rf_roc_auc = auc(rf_fpr, rf_tpr)
plt.plot(nb_fpr, nb_tpr, color='cyan', linestyle='--',
label='NB (area = %0.2f)' % nb_roc_auc, lw=2)
plt.plot(qda_fpr, qda_tpr, color='indigo', linestyle='--',
label='QDA (area = %0.2f)' % qda_roc_auc, lw=2)
plt.plot(log_fpr, log_tpr, color='seagreen', linestyle='--',
label='LOG (area = %0.2f)' % log_roc_auc, lw=2)
plt.plot(knn_fpr, knn_tpr, color='yellow', linestyle='--',
label='KNN (area = %0.2f)' % knn_roc_auc, lw=2)
plt.plot(rf_fpr, rf_tpr, color='blue', linestyle='--',
label='RF (area = %0.2f)' % rf_roc_auc, lw=2)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='k',
label='Luck')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curves of Best Iteration of Each Model')
plt.legend(loc="lower right")
plt.savefig('./plots/ROC_Best.png', bbox_inches='tight')
plt.show()
"""
Explanation: Plotting the Best of Each Classifier
End of explanation
"""
log_cv_results=pd.DataFrame(log_clf_est_b.cv_results_)
log_cv_results.head()
"""
Explanation: Inspecting the Best Classifier
While best model performs extremely well. It is important to be aware of model characteristics like the variability of prediction quality, a key model reliability metric, and the important features, which should inform data maintenance and engineering practices as well as model interpretation.
Variability of Prediction Quality
Normally I would use bootstrapped samples of the test data, with the model fitted on the whole training data, to obtain an empirical distribution of the model's performance (AUC ROC in this case). However, with limited data and time, I will use the AUC ROC on the validation folds of the CV grid search to get a sense of the variability. Normally the validation set AUC ROC values will be biased towards optimism compared to the true out of sample performance (on the test set), however, this isn't the case in the example below.
End of explanation
"""
log_cv_results=log_cv_results[(log_cv_results.rank_test_score==1)]
log_cv_results.head()
keep_cols=["split"+str(i)+"_test_score" for i in range(0,10)]
log_cv_results=log_cv_results[keep_cols]
log_cv_results.head()
temp=log_cv_results.T.reset_index()
temp.columns=['Fold','Validation AUC ROC']
temp['Fold']=range(1,11)
temp
temp['Validation AUC ROC'].describe()
"""
Explanation: Isolate the best parameters
End of explanation
"""
rf_clf_b.set_params(**rf_clf_est_b.best_params_).fit(X_train,y_train)
importance=rf_clf_b.named_steps['clf'].feature_importances_
indices = np.argsort(importance)[::-1]
feature_importance=pd.DataFrame({'feature':clean_data.columns[:-1][indices],
'importance':importance})
feature_importance.sort_values(by='importance',inplace=True,ascending=False)
feature_importance[:10]
"""
Explanation: Fortunately, the performance is stable. With more data and time, I would do repeat cross-validation or repeated nested cross-validation to get more robust estimates of the out of sample error and its variability.
Important features
Unfortunately, even on a standardized scale, coefficient magnitude is not necessarily the right way to determine variable importance in a logistic regression. Fortunately, the random forest classifier has similar performance to the logistic regression, so I will use it to identify important features.
End of explanation
"""
best_paras = log_clf_est_b.best_params_
best_paras
with open('./model_para/logistic_best_paras.json', 'w') as outfile:
json.dump(best_paras, outfile)
"""
Explanation: It is not surprising that the most important two features are ad attributes. This also adds confidence to the model by showing that the most important features make intuitive sense; although robust models can have seemingly non-intuitive important features.
Many of the other top ten features, like urlstatic.wired.com and ancurlwww.amazon.com, also make sense because they are likely links to the urls of the company that owns the ad.
I would not be surprised if adding random forest as a feature selection step in the pipeline wouldn't bring the AUC ROC of nearly all of the classifiers to 0.99.
Final Thoughts
The best model is clearly the logistic classifier without the sampling-based class imbalance corrections. While the random forest mirrors its performance, the random forest is a much more complex and computationally expensive model. Therefore, in practice, the logistic classifier would be best.
It is surprising that the class imbalance corrections had a limited impact on the more complex classifiers. In other classification tasks, the class imbalance corrections, especially the Tomek Link removals, significantly improved the AUC ROC of the more complex classifiers. However every classification task is different.
If the AUC ROC was not already 0.99, I would try feature selection via random forest, ensemble methods other than random forest like bagging classifiers, adaptive boosting, or even extreme gradient boosting or do more aggressive feature engineering and hyperparameter tuning. However at an AUC ROC of 0.99, this is definitely not a good investment of time.
Saving parameters of final model
End of explanation
"""
|
tsarouch/data_science_references_python | pyspark/pyspark_read_csv_template.ipynb | gpl-2.0 | from pyspark.sql import SQLContext, Row
sqlContext = SQLContext(sc)
"""
Explanation: """
This is done at a time where Spark did not support csv parsing 'in single line'
I m sure it will come soon...
"""
""" === e.g. the csv file looks like this:===
field1, field2, time
5768, 49.4,'2014-12-19 04:15:00+01',
1039, 26.1, 2014-12-18 14:45:00+01'
...
"""
Start Notebook with pyspark
IPYTHON_OPTS="notebook --profile=pysparknb" /Users/charil/.../bin/pyspark --jars /Users/chari.../spark-examples-1.3.1-hadoop2.4.0.jar
Necessary imports
End of explanation
"""
# read the csv data
data = sc.textFile('tableau_grid_tile_refactored.csv')
# remove the header
header = data.take(1)[0]
rows = data.filter(lambda line: line != header)
# parse the lines, split with delimiter and provide Row names (and types if you wish, if not inferred)
row_parts = rows.map(lambda l: l.split("|"))
data_rdd = row_parts.map(lambda p: Row(area_id=p[0], cnt=p[1], start_time=str(p[2])))
# convert to dataframe
df = sqlContext.createDataFrame(data_rdd)
# lets see now 2 rows of it
df.show(2)
"""
Explanation: csv to DataFrame
End of explanation
"""
|
piskvorky/gensim | docs/src/auto_examples/howtos/run_downloader_api.ipynb | lgpl-2.1 | import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
"""
Explanation: How to download pre-trained models and corpora
Demonstrates simple and quick access to common corpora and pretrained models.
End of explanation
"""
import gensim.downloader as api
"""
Explanation: One of Gensim's features is simple and easy access to common data.
The gensim-data <https://github.com/RaRe-Technologies/gensim-data>_ project stores a
variety of corpora and pretrained models.
Gensim has a :py:mod:gensim.downloader module for programmatically accessing this data.
This module leverages a local cache (in user's home folder, by default) that
ensures data is downloaded at most once.
This tutorial:
Downloads the text8 corpus, unless it is already on your local machine
Trains a Word2Vec model from the corpus (see sphx_glr_auto_examples_tutorials_run_doc2vec_lee.py for a detailed tutorial)
Leverages the model to calculate word similarity
Demonstrates using the API to load other models and corpora
Let's start by importing the api module.
End of explanation
"""
corpus = api.load('text8')
"""
Explanation: Now, let's download the text8 corpus and load it as a Python object
that supports streamed access.
End of explanation
"""
import inspect
print(inspect.getsource(corpus.__class__))
"""
Explanation: In this case, our corpus is an iterable.
If you look under the covers, it has the following definition:
End of explanation
"""
print(inspect.getfile(corpus.__class__))
"""
Explanation: For more details, look inside the file that defines the Dataset class for your particular resource.
End of explanation
"""
from gensim.models.word2vec import Word2Vec
model = Word2Vec(corpus)
"""
Explanation: With the corpus has been downloaded and loaded, let's use it to train a word2vec model.
End of explanation
"""
print(model.wv.most_similar('tree'))
"""
Explanation: Now that we have our word2vec model, let's find words that are similar to 'tree'.
End of explanation
"""
import json
info = api.info()
print(json.dumps(info, indent=4))
"""
Explanation: You can use the API to download several different corpora and pretrained models.
Here's how to list all resources available in gensim-data:
End of explanation
"""
print(info.keys())
"""
Explanation: There are two types of data resources: corpora and models.
End of explanation
"""
for corpus_name, corpus_data in sorted(info['corpora'].items()):
print(
'%s (%d records): %s' % (
corpus_name,
corpus_data.get('num_records', -1),
corpus_data['description'][:40] + '...',
)
)
"""
Explanation: Let's have a look at the available corpora:
End of explanation
"""
for model_name, model_data in sorted(info['models'].items()):
print(
'%s (%d records): %s' % (
model_name,
model_data.get('num_records', -1),
model_data['description'][:40] + '...',
)
)
"""
Explanation: ... and the same for models:
End of explanation
"""
fake_news_info = api.info('fake-news')
print(json.dumps(fake_news_info, indent=4))
"""
Explanation: If you want to get detailed information about a model/corpus, use:
End of explanation
"""
print(api.load('glove-wiki-gigaword-50', return_path=True))
"""
Explanation: Sometimes, you do not want to load a model into memory. Instead, you can request
just the filesystem path to the model. For that, use:
End of explanation
"""
model = api.load("glove-wiki-gigaword-50")
model.most_similar("glass")
"""
Explanation: If you want to load the model to memory, then:
End of explanation
"""
|
yandexdataschool/LHCb-topo-trigger | HLT2-TreesPruning.ipynb | apache-2.0 | sig_train_modes_names = [11114001, 11296013, 11874042, 12103035, 13246001, 13264021]
bck_train_mode_name = 30000000
sig_train_files = ['mod_{}.csv'.format(name) for name in sig_train_modes_names]
bck_train_files = 'mod_30000000.csv'
folder = "datasets/prepared_hlt_body/"
# concat all signal data
if not os.path.exists(folder + 'signal_hlt2.csv'):
concat_files(folder, sig_train_files, os.path.join(folder , 'signal_hlt2.csv'))
signal_data = pandas.read_csv(os.path.join(folder , 'signal_hlt2.csv'), sep='\t')
bck_data = pandas.read_csv(os.path.join(folder , bck_train_files), sep='\t')
signal_data.columns
"""
Explanation: HLT2 nbody classification
did preselections:
any sv.n,
any sv.minpt
sv.nlt16 < 2
Training channels (read data)
We will use just 11114001, 11296013, 11874042, 12103035, 13246001, 13264021
End of explanation
"""
print 'Signal', statistic_length(signal_data)
print 'Bck', statistic_length(bck_data)
total_bck_events = statistic_length(bck_data)['Events'] + empty_events[bck_train_mode_name]
total_signal_events_by_mode = dict()
for mode in sig_train_modes_names:
total_signal_events_by_mode[mode] = statistic_length(signal_data[signal_data['mode'] == mode])['Events'] + empty_events[mode]
"""
Explanation: Counting events and svrs,
that passed L0 and GoodGenB preselection (this data was generated by skim)
End of explanation
"""
print 'Bck:', total_bck_events
'Signal:', total_signal_events_by_mode
"""
Explanation: events distribution by mode
End of explanation
"""
variables = ["n", "mcor", "chi2", "eta", "fdchi2", "minpt", "nlt16", "ipchi2", "n1trk", "sumpt"]
"""
Explanation: Define variables
End of explanation
"""
# hlt2 nbody selection
signal_data = signal_data[(signal_data['pass_nbody'] == 1) & (signal_data['mcor'] <= 10e3)]
bck_data = bck_data[(bck_data['pass_nbody'] == 1) & (bck_data['mcor'] <= 10e3)]
print 'Signal', statistic_length(signal_data)
print 'Bck', statistic_length(bck_data)
total_signal_events_by_mode_presel = dict()
for mode in sig_train_modes_names:
total_signal_events_by_mode_presel[mode] = statistic_length(signal_data[signal_data['mode'] == mode])['Events']
total_bck_events_presel = statistic_length(bck_data)['Events']
"""
Explanation: Counting events and svrs,
which passed pass_nbody (equivalent Mike's preselections for nbody selection)
End of explanation
"""
print 'Bck:', total_bck_events_presel
'Signal:', total_signal_events_by_mode_presel
signal_data.head()
"""
Explanation: events distribution by mode
End of explanation
"""
ds_train_signal, ds_train_bck, ds_test_signal, ds_test_bck = prepare_data(signal_data, bck_data, 'unique')
"""
Explanation: Prepare train/test splitting
Divide events which passed alll preselections into two equal parts randomly
End of explanation
"""
print 'Signal', statistic_length(ds_train_signal)
print 'Bck', statistic_length(ds_train_bck)
train = pandas.concat([ds_train_bck, ds_train_signal])
"""
Explanation: train: counting events and svrs
End of explanation
"""
print 'Signal', statistic_length(ds_test_signal)
print 'Bck', statistic_length(ds_test_bck)
test = pandas.concat([ds_test_bck, ds_test_signal])
"""
Explanation: test: counting events and svrs
End of explanation
"""
total_test_bck_events = (total_bck_events - total_bck_events_presel) // 2 + statistic_length(ds_test_bck)['Events']
total_test_signal_events = dict()
for mode in sig_train_modes_names:
total_not_passed_signal = total_signal_events_by_mode[mode] - total_signal_events_by_mode_presel[mode]
total_test_signal_events[mode] = total_not_passed_signal // 2 + \
statistic_length(ds_test_signal[ds_test_signal['mode'] == mode])['Events']
print 'Bck total test events:', total_test_bck_events
'Signal total test events:', total_test_signal_events
import cPickle
if os.path.exists('models/prunned.pkl'):
with open('models/prunned.pkl', 'r') as file_pr:
estimators = cPickle.load(file_pr)
"""
Explanation: Define all total events in test samples
(which passed just l0 and goodgenB) using also empty events. Suppose that events which didn't pass pass_nboby also were equal randomly divided into training and test samples
End of explanation
"""
from rep_ef.estimators import MatrixNetSkyGridClassifier
"""
Explanation: Matrixnet training
End of explanation
"""
ef_base = MatrixNetSkyGridClassifier(train_features=variables, user_name='antares',
connection='skygrid',
iterations=5000, sync=False)
ef_base.fit(train, train['signal'])
"""
Explanation: Base model with 5000 trees
End of explanation
"""
special_b = {
'n': [2.5, 3.5],
'mcor': [2000,3000,4000,5000,7500], # I want to remove splits too close the the B mass as I was looking in simulation and this could distort the mass peak (possibly)
'chi2': [1,2.5,5,7.5,10,100], # I also propose we add a cut to the pre-selection of chi2 < 1000. I don't want to put in splits at too small values here b/c these type of inputs are never modeled quite right in the simulation (they always look a bit more smeared in data).
'sumpt': [3000,4000,5000,6000,7500,9000,12e3,23e3,50e3], # I am happy with the MN splits here (these are almost "as is" from modify-6)
'eta': [2.5,3,3.75,4.25,4.5], # Close to MN.
'fdchi2': [33,125,350,780,1800,5000,10000], # I want to make the biggest split 10e3 because in the simulated events there is pretty much only BKGD above 40e3 but we don't want the BDT to learn to kill these as new particles would live here. Otherwise I took the MN splits and modified the first one (the first one is 5sigma now).
'minpt': [350,500,750,1500,3000,5000], # let's make 500 the 2nd split so that this lines up with the HLT1 SVs.
'nlt16': [0.5],
'ipchi2': [8,26,62,150,500,1000], # I also propose we add a cut of IP chi2 < 5000 as it's all background out there.
'n1trk': [0.5, 1.5, 2.5, 3.5]
}
ef_base_bbdt = MatrixNetSkyGridClassifier(train_features=variables, user_name='antares',
connection='skygrid',
iterations=5000, sync=False, intervals=special_b)
ef_base_bbdt.fit(train, train['signal'])
"""
Explanation: Base BBDT model
End of explanation
"""
ef_base_bbdt5 = MatrixNetSkyGridClassifier(train_features=variables, user_name='antares',
connection='skygrid',
iterations=5000, sync=False, intervals=5)
ef_base_bbdt5.fit(train, train['signal'])
ef_base_bbdt6 = MatrixNetSkyGridClassifier(train_features=variables, user_name='antares',
connection='skygrid',
iterations=5000, sync=False, intervals=6)
ef_base_bbdt6.fit(train, train['signal'])
"""
Explanation: BBDT-5, 6
End of explanation
"""
from rep.data import LabeledDataStorage
from rep.report import ClassificationReport
report = ClassificationReport({'base': ef_base}, LabeledDataStorage(test, test['signal']))
report.roc()
"""
Explanation: Pruning
End of explanation
"""
%run pruning.py
"""
Explanation: Minimize log_loss
он же BinomialDeviance
End of explanation
"""
new_trainlen = (len(train) // 8) * 8
trainX = train[ef_base.features][:new_trainlen].values
trainY = train['signal'][:new_trainlen].values
trainW = numpy.ones(len(trainY))
trainW[trainY == 0] *= sum(trainY) / sum(1 - trainY)
new_features, new_formula_mx, new_classifier = select_trees(trainX, trainY, sample_weight=trainW,
initial_classifier=ef_base,
iterations=100, n_candidates=100,
learning_rate=0.1, regularization=50.)
prunned = cPickle.loads(cPickle.dumps(ef_base))
prunned.formula_mx = new_formula_mx
def mode_scheme_fit(train, base, suf, model_file):
blending_parts = OrderedDict()
for n_ch, ch in enumerate(sig_train_modes_names):
temp = FoldingClassifier(base_estimator=base, random_state=11, features=variables, ipc_profile=PROFILE)
temp_data = train[(train['mode'] == ch) | (train['mode'] == bck_train_mode_name)]
temp.fit(temp_data, temp_data['signal'])
blending_parts['ch' + str(n_ch) + suf] = temp
import cPickle
with open(model_file, 'w') as f:
cPickle.dump(blending_parts, f)
def mode_scheme_predict(data, suf, model_file, mode='train'):
with open(model_file, 'r') as f:
blending_parts = cPickle.load(f)
for n_ch, ch in enumerate(sig_train_modes_names):
temp_name = 'ch' + str(n_ch) + suf
if mode == 'train':
temp_key = ((data['mode'] == ch) | (data['mode'] == bck_train_mode_name))
data.ix[temp_key, temp_name] = blending_parts[temp_name].predict_proba(
data[temp_key])[:, 1]
data.ix[~temp_key, temp_name] = blending_parts[temp_name].predict_proba(
data[~temp_key])[:, 1]
else:
data[temp_name] = blending_parts[temp_name].predict_proba(data)[:, 1]
def get_best_svr_by_channel(data, feature_mask, count=1):
add_events = []
for id_est, channel in enumerate(sig_train_modes_names):
train_part = data[(data['mode'] == channel)]
for num, group in train_part.groupby('unique'):
index = numpy.argsort(group[feature_mask.format(id_est)].values)[::-1]
add_events.append(group.iloc[index[:count], :])
good_events = pandas.concat([data[(data['mode'] == bck_train_mode_name)]] + add_events)
print len(good_events)
return good_events
from sklearn.ensemble import RandomForestClassifier
from rep.metaml import FoldingClassifier
base = RandomForestClassifier(n_estimators=500, min_samples_leaf=50, max_depth=6,
max_features=7, n_jobs=8)
mode_scheme_fit(train, base, '', 'forest_trick.pkl')
mode_scheme_predict(train, '', 'forest_trick.pkl')
mode_scheme_predict(test, '', 'forest_trick.pkl', mode='test')
good_events = get_best_svr_by_channel(train, 'ch{}', 2)
forest_mn = MatrixNetSkyGridClassifier(train_features=variables,
user_name='antares',
connection='skygrid',
iterations=5000, sync=False)
forest_mn.fit(good_events, good_events['signal'])
forest_mn_bbdt = MatrixNetSkyGridClassifier(train_features=variables,
user_name='antares',
connection='skygrid',
iterations=5000, sync=False, intervals=special_b)
forest_mn_bbdt.fit(good_events, good_events['signal'])
new_trainlen = (len(good_events) // 8) * 8
trainX = good_events[forest_mn.features][:new_trainlen].values
trainY = good_events['signal'][:new_trainlen].values
trainW = numpy.ones(len(trainY))
trainW[trainY == 0] *= sum(trainY) / sum(1 - trainY)
len(train), len(good_events)
new_features_f, new_formula_mx_f, new_classifier_f = select_trees(trainX, trainY, sample_weight=trainW,
initial_classifier=forest_mn,
iterations=100, n_candidates=100,
learning_rate=0.1, regularization=50.)
prunned_f = cPickle.loads(cPickle.dumps(forest_mn))
prunned_f.formula_mx = new_formula_mx_f
estimators = {'base MN': ef_base, 'BBDT MN-6': ef_base_bbdt6, 'BBDT MN-5': ef_base_bbdt5,
'BBDT MN special': ef_base_bbdt,
'Prunned MN': prunned, 'base MN + forest': forest_mn,
'BBDT MN special + forest': forest_mn_bbdt, 'Prunned MN + forest': prunned_f}
import cPickle
with open('models/prunned.pkl', 'w') as file_pr:
cPickle.dump(estimators, file_pr)
"""
Explanation: Training sample is cut to be aliquot 8
End of explanation
"""
thresholds = dict()
test_bck = test[test['signal'] == 0]
RATE = [2500., 4000.]
events_pass = dict()
for name, cl in estimators.items():
prob = cl.predict_proba(test_bck)
thr, result = calculate_thresholds(test_bck, prob, total_test_bck_events, rates=RATE)
for rate, val in result.items():
events_pass['{}-{}'.format(rate, name)] = val[1]
thresholds[name] = thr
print name, result
"""
Explanation: Calculate thresholds on classifiers
End of explanation
"""
train_modes_eff, statistic = result_statistic(estimators, sig_train_modes_names,
test[test['signal'] == 1],
thresholds, RATE, total_test_signal_events)
from rep.plotting import BarComparePlot
xticks_labels = ['$B^0 \\to K^*\mu^+\mu^-$', "$B^0 \\to D^+D^-$", "$B^0 \\to D^- \mu^+ \\nu_{\mu}$",
'$B^+ \\to \pi^+ K^-K^+$', '$B^0_s \\to \psi(1S) K^+K^-\pi^+\pi^-$', '$B^0_s \\to D_s^-\pi^+$']
for r in RATE:
new_dict = []
for key, val in train_modes_eff.iteritems():
if (key[0] in {'base MN', 'Prunned MN', 'BBDT MN special',
'base MN + forest', 'Prunned MN + forest', 'BBDT MN special + forest'}) and r == key[1]:
new_dict.append((key, val))
new_dict = dict(new_dict)
BarComparePlot(new_dict).plot(new_plot=True, figsize=(24, 8), ylabel='efficiency', fontsize=22)
xticks(3 + 11 * numpy.arange(6), xticks_labels, rotation=0)
lgd = legend(bbox_to_anchor=(0.5, 1.3), loc='upper center', ncol=2, fontsize=22)
# plt.savefig('hlt2-experiments.pdf' , format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
from rep.plotting import BarComparePlot
for r in RATE:
new_dict = []
for key, val in train_modes_eff.iteritems():
if r == key[1]:
new_dict.append((key, val))
new_dict = dict(new_dict)
BarComparePlot(new_dict).plot(new_plot=True, figsize=(24, 8), ylabel='efficiency', fontsize=22)
lgd = legend(bbox_to_anchor=(0.5, 1.3), loc='upper center', ncol=2, fontsize=22)
# plt.savefig('hlt2-experiments.pdf' , format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
"""
Explanation: Final efficiencies for each mode
End of explanation
"""
plots = OrderedDict()
for key, value in estimators.items():
plots[key] = plot_roc_events(value, test[test['signal'] == 1], test[test['signal'] == 0], key)
bbdt_plots = plots.copy()
bbdt_plots.pop('Prunned MN')
bbdt_plots.pop('Prunned MN + forest')
from rep.plotting import FunctionsPlot
FunctionsPlot(bbdt_plots).plot(new_plot=True, xlim=(0.02, 0.06), ylim=(0.65, 0.82))
plot([1. * events_pass['2500.0-base MN'] / statistic_length(ds_test_bck)['Events']] * 2,
[0., 1], 'b--', label='rate: 2.5 kHz')
plot([1. * events_pass['4000.0-base MN'] / statistic_length(ds_test_bck)['Events']] * 2,
[0., 1], 'g--', label='rate: 4. kHz')
lgd = legend(loc='upper center', fontsize=16, bbox_to_anchor=(0.5, 1.3), ncol=3)
title('ROC for events (training decays)', fontsize=20)
xlabel('FRP, background events efficiency', fontsize=20)
ylabel('TPR, signal events efficiency', fontsize=20)
from rep.plotting import FunctionsPlot
FunctionsPlot(plots).plot(new_plot=True, xlim=(0.02, 0.06), ylim=(0.65, 0.82))
plot([1. * events_pass['2500.0-base MN'] / statistic_length(ds_test_bck)['Events']] * 2,
[0., 1], 'b--', label='rate: 2.5 kHz')
plot([1. * events_pass['4000.0-base MN'] / statistic_length(ds_test_bck)['Events']] * 2,
[0., 1], 'g--', label='rate: 4. kHz')
lgd = legend(loc='upper center', fontsize=16, bbox_to_anchor=(0.5, 1.4), ncol=3)
title('ROC for events (training decays)', fontsize=20)
xlabel('FRP, background events efficiency', fontsize=20)
ylabel('TPR, signal events efficiency', fontsize=20)
"""
Explanation: Classification report using events
End of explanation
"""
from collections import defaultdict
all_channels = []
efficiencies = defaultdict(OrderedDict)
for mode in empty_events.keys():
if mode in set(sig_train_modes_names) or mode == bck_train_mode_name:
continue
df = pandas.read_csv(os.path.join(folder , 'mod_{}.csv'.format(mode)), sep='\t')
if len(df) <= 0:
continue
total_events = statistic_length(df)['Events'] + empty_events[mode]
df = df[(df['pass_nbody'] == 1) & (df['mcor'] <= 10e3)]
passed_events = statistic_length(df)['Events']
all_channels.append(df)
for name, cl in estimators.items():
prob = cl.predict_proba(df)
for rate, thresh in thresholds[name].items():
eff = final_eff_for_mode(df, prob, total_events, thresh)
latex_name = '$' + Samples[str(mode)]['root'].replace("#", "\\") + '$'
efficiencies[(name, rate)][latex_name] = eff
for key, val in efficiencies.items():
for key_2, val_2 in val.items():
if val_2 <= 0.1:
efficiencies[key].pop(key_2)
from rep.plotting import BarComparePlot
for r in RATE:
new_dict = []
for key, val in efficiencies.iteritems():
if r == key[1]:
new_dict.append((key, val))
new_dict = dict(new_dict)
BarComparePlot(new_dict).plot(new_plot=True, figsize=(24, 8), ylabel='efficiency', fontsize=22)
lgd = legend(bbox_to_anchor=(0.5, 1.4), loc='upper center', ncol=2, fontsize=22)
plots_all = OrderedDict()
for key, value in estimators.items():
plots_all[key] = plot_roc_events(value, pandas.concat([test[test['signal'] == 1]] + all_channels),
test[test['signal'] == 0], key)
from rep.plotting import FunctionsPlot
FunctionsPlot(plots_all).plot(new_plot=True, xlim=(0.02, 0.06), ylim=(0.5, 0.66))
plot([1. * events_pass['2500.0-base MN'] / statistic_length(ds_test_bck)['Events']] * 2,
[0., 1], 'b--', label='rate: 2.5 kHz')
plot([1. * events_pass['4000.0-base MN'] / statistic_length(ds_test_bck)['Events']] * 2,
[0., 1], 'g--', label='rate: 4. kHz')
lgd = legend(loc='upper center', fontsize=16, bbox_to_anchor=(0.5, 1.3), ncol=4)
title('ROC for events (all decays together)', fontsize=20)
xlabel('FRP, background events efficiency', fontsize=20)
ylabel('TPR, signal events efficiency', fontsize=20)
"""
Explanation: all channels efficiencies
End of explanation
"""
thresholds = OrderedDict()
RATE = [2000., 2500., 3000., 3500., 4000.]
for name, cl in estimators.items():
prob = cl.predict_proba(ds_test_bck)
thr, result = calculate_thresholds(ds_test_bck, prob, total_test_bck_events, rates=RATE)
thresholds[name] = thr
print name, result
train_modes_eff, statistic = result_statistic({'base MN': estimators['base MN']}, sig_train_modes_names,
test[test['signal'] == 1],
thresholds, RATE, total_test_signal_events)
order_rate = OrderedDict()
for j in numpy.argsort([i[1] for i in train_modes_eff.keys()]):
order_rate[train_modes_eff.keys()[j]] = train_modes_eff.values()[j]
from rep.plotting import BarComparePlot
BarComparePlot(order_rate).plot(new_plot=True, figsize=(18, 6), ylabel='efficiency', fontsize=18)
lgd = legend(bbox_to_anchor=(0.5, 1.2), loc='upper center', ncol=5, fontsize=18)
# plt.savefig('rates.pdf' , format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
"""
Explanation: DIfferent rates
End of explanation
"""
|
Kaggle/learntools | notebooks/data_cleaning/raw/ex5.ipynb | apache-2.0 | from learntools.core import binder
binder.bind(globals())
from learntools.data_cleaning.ex5 import *
print("Setup Complete")
"""
Explanation: In this exercise, you'll apply what you learned in the Inconsistent data entry tutorial.
Setup
The questions below will give you feedback on your work. Run the following cell to set up the feedback system.
End of explanation
"""
# modules we'll use
import pandas as pd
import numpy as np
# helpful modules
import fuzzywuzzy
from fuzzywuzzy import process
import chardet
# read in all our data
professors = pd.read_csv("../input/pakistan-intellectual-capital/pakistan_intellectual_capital.csv")
# set seed for reproducibility
np.random.seed(0)
"""
Explanation: Get our environment set up
The first thing we'll need to do is load in the libraries and dataset we'll be using. We use the same dataset from the tutorial.
End of explanation
"""
# convert to lower case
professors['Country'] = professors['Country'].str.lower()
# remove trailing white spaces
professors['Country'] = professors['Country'].str.strip()
# get the top 10 closest matches to "south korea"
countries = professors['Country'].unique()
matches = fuzzywuzzy.process.extract("south korea", countries, limit=10, scorer=fuzzywuzzy.fuzz.token_sort_ratio)
def replace_matches_in_column(df, column, string_to_match, min_ratio = 47):
# get a list of unique strings
strings = df[column].unique()
# get the top 10 closest matches to our input string
matches = fuzzywuzzy.process.extract(string_to_match, strings,
limit=10, scorer=fuzzywuzzy.fuzz.token_sort_ratio)
# only get matches with a ratio > 90
close_matches = [matches[0] for matches in matches if matches[1] >= min_ratio]
# get the rows of all the close matches in our dataframe
rows_with_matches = df[column].isin(close_matches)
# replace all rows with close matches with the input matches
df.loc[rows_with_matches, column] = string_to_match
# let us know the function's done
print("All done!")
replace_matches_in_column(df=professors, column='Country', string_to_match="south korea")
countries = professors['Country'].unique()
"""
Explanation: Next, we'll redo all of the work that we did in the tutorial.
End of explanation
"""
# TODO: Your code here
#%%RM_IF(PROD)%%
unis = professors['Graduated from'].unique()
# sort them alphabetically and then take a closer look
unis.sort()
unis
"""
Explanation: 1) Examine another column
Write code below to take a look at all the unique values in the "Graduated from" column.
End of explanation
"""
# Check your answer (Run this code cell to receive credit!)
q1.check()
# Line below will give you a hint
#_COMMENT_IF(PROD)_
q1.hint()
"""
Explanation: Do you notice any inconsistencies in the data? Can any of the inconsistencies in the data be fixed by removing white spaces at the beginning and end of cells?
Once you have answered these questions, run the code cell below to get credit for your work.
End of explanation
"""
# TODO: Your code here
____
# Check your answer
q2.check()
#%%RM_IF(PROD)%%
q2.assert_check_failed()
#%%RM_IF(PROD)%%
professors['Graduated from'] = professors['Graduated from'].str.strip()
q2.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q2.hint()
#_COMMENT_IF(PROD)_
q2.solution()
"""
Explanation: 2) Do some text pre-processing
Convert every entry in the "Graduated from" column in the professors DataFrame to remove white spaces at the beginning and end of cells.
End of explanation
"""
# get all the unique values in the 'City' column
countries = professors['Country'].unique()
# sort them alphabetically and then take a closer look
countries.sort()
countries
"""
Explanation: 3) Continue working with countries
In the tutorial, we focused on cleaning up inconsistencies in the "Country" column. Run the code cell below to view the list of unique values that we ended with.
End of explanation
"""
# TODO: Your code here!
____
# Check your answer
q3.check()
#%%RM_IF(PROD)%%
q3.assert_check_failed()
#%%RM_IF(PROD)%%
matches = fuzzywuzzy.process.extract("usa", countries, limit=10, scorer=fuzzywuzzy.fuzz.token_sort_ratio)
replace_matches_in_column(df=professors, column='Country', string_to_match="usa", min_ratio=70)
#q3.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q3.hint()
#_COMMENT_IF(PROD)_
q3.solution()
"""
Explanation: Take another look at the "Country" column and see if there's any more data cleaning we need to do.
It looks like 'usa' and 'usofa' should be the same country. Correct the "Country" column in the dataframe to replace 'usofa' with 'usa'.
Use the most recent version of the DataFrame (with the whitespaces at the beginning and end of cells removed) from question 2.
End of explanation
"""
|
bmorris3/gsoc2015 | landolt_standards_recipe.ipynb | mit | catalog_name = 'Landolt 1992'
observatory_name = 'Apache Point'
from astroquery.vizier import Vizier
from astropy.coordinates import SkyCoord
import astropy.units as u
catalog_list = Vizier.find_catalogs(catalog_name)
catalogs = Vizier.get_catalogs(catalog_list.keys())
Vizier.ROW_LIMIT = -1 # Otherwise would only show first 50 values
catalog_table = catalogs[0] # This is the table with the data
RAs = u.Quantity(catalog_table['_RAJ2000'].data, unit=u.deg)
Decs = u.Quantity(catalog_table['_DEJ2000'].data, unit=u.deg)
names = list(catalog_table['SimbadName'].data)
landolt_standards = SkyCoord(ra=RAs, dec=Decs)
"""
Explanation: Which Landolt (1992) standard stars are visible tonight?
Use astroquery to get a table of Landolt (1992) standard stars from Vizier.
End of explanation
"""
from astroplan import Observer, FixedTarget
obs = Observer.at_site(observatory_name)
target_list = [FixedTarget(coord=coord, name=name)
for coord, name in zip(landolt_standards, names)]
"""
Explanation: Set up an Observer and list of FixedTargets in astroplan.
End of explanation
"""
from astroplan import is_observable, observability_table, AltitudeConstraint, AtNightConstraint
from astropy.time import Time
constraints = [AltitudeConstraint(min=25*u.deg),
AtNightConstraint.twilight_astronomical()]
# Figure out when "tonight" is
present_time = Time.now()
if not obs.is_night(present_time):
# If it's currently day time at runtime, find time of sunset and sunrise
tonight_start = obs.sun_set_time(present_time, which='next')
tonight_end = obs.sun_rise_time(present_time, which='next')
else:
# Otherwise find time to next sunrise
tonight_start = present_time
tonight_end = obs.sun_rise_time(present_time, which='next')
table = observability_table(constraints, obs, target_list,
time_range=Time([tonight_start, tonight_end]))
print(table)
"""
Explanation: Determine which standards are observable tonight.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/uhh/cmip6/models/sandbox-2/land.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'uhh', 'sandbox-2', 'land')
"""
Explanation: ES-DOC CMIP6 Model Properties - Land
MIP Era: CMIP6
Institute: UHH
Source ID: SANDBOX-2
Topic: Land
Sub-Topics: Soil, Snow, Vegetation, Energy Balance, Carbon Cycle, Nitrogen Cycle, River Routing, Lakes.
Properties: 154 (96 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:41
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Conservation Properties
3. Key Properties --> Timestepping Framework
4. Key Properties --> Software Properties
5. Grid
6. Grid --> Horizontal
7. Grid --> Vertical
8. Soil
9. Soil --> Soil Map
10. Soil --> Snow Free Albedo
11. Soil --> Hydrology
12. Soil --> Hydrology --> Freezing
13. Soil --> Hydrology --> Drainage
14. Soil --> Heat Treatment
15. Snow
16. Snow --> Snow Albedo
17. Vegetation
18. Energy Balance
19. Carbon Cycle
20. Carbon Cycle --> Vegetation
21. Carbon Cycle --> Vegetation --> Photosynthesis
22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
23. Carbon Cycle --> Vegetation --> Allocation
24. Carbon Cycle --> Vegetation --> Phenology
25. Carbon Cycle --> Vegetation --> Mortality
26. Carbon Cycle --> Litter
27. Carbon Cycle --> Soil
28. Carbon Cycle --> Permafrost Carbon
29. Nitrogen Cycle
30. River Routing
31. River Routing --> Oceanic Discharge
32. Lakes
33. Lakes --> Method
34. Lakes --> Wetlands
1. Key Properties
Land surface key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code (e.g. MOSES2.2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.3. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the processes modelled (e.g. dymanic vegation, prognostic albedo, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Land Atmosphere Flux Exchanges
Is Required: FALSE Type: ENUM Cardinality: 0.N
Fluxes exchanged with the atmopshere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Atmospheric Coupling Treatment
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of land surface coupling with the Atmosphere model component, which may be different for different quantities (e.g. dust: semi-implicit, water vapour: explicit)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.6. Land Cover
Is Required: TRUE Type: ENUM Cardinality: 1.N
Types of land cover defined in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.7. Land Cover Change
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how land cover change is managed (e.g. the use of net or gross transitions)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.8. Tiling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general tiling procedure used in the land surface (if any). Include treatment of physiography, land/sea, (dynamic) vegetation coverage and orography/roughness
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Conservation Properties
TODO
2.1. Energy
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how energy is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Water
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how water is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Carbon
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how carbon is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping Framework
TODO
3.1. Timestep Dependent On Atmosphere
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a time step dependent on the frequency of atmosphere coupling?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Overall timestep of land surface model (i.e. time between calls)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestepping Method
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of time stepping method and associated time step(s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Software Properties
Software properties of land surface code
4.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid
Land surface grid
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Horizontal
The horizontal grid in the land surface
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the horizontal grid (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the horizontal grid match the atmosphere?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Vertical
The vertical grid in the soil
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the vertical grid in the soil (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.2. Total Depth
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The total depth of the soil (in metres)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Soil
Land surface soil
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of soil in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Heat Water Coupling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the coupling between heat and water in the soil
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 8.3. Number Of Soil layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the soil scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Soil --> Soil Map
Key properties of the land surface soil map
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of soil map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Structure
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil structure map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Texture
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil texture map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.4. Organic Matter
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil organic matter map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.5. Albedo
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil albedo map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.6. Water Table
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil water table map, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 9.7. Continuously Varying Soil Depth
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the soil properties vary continuously with depth?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.8. Soil Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil depth map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 10. Soil --> Snow Free Albedo
TODO
10.1. Prognostic
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow free albedo prognostic?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
If prognostic, describe the dependancies on snow free albedo calculations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.3. Direct Diffuse
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, describe the distinction between direct and diffuse albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 10.4. Number Of Wavelength Bands
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If prognostic, enter the number of wavelength bands used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Soil --> Hydrology
Key properties of the land surface soil hydrology
11.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the soil hydrological model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river soil hydrology in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil hydrology tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.5. Number Of Ground Water Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers that may contain water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.6. Lateral Connectivity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe the lateral connectivity between tiles
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.7. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
The hydrological dynamics scheme in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 12. Soil --> Hydrology --> Freezing
TODO
12.1. Number Of Ground Ice Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
How many soil layers may contain ground ice
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Ice Storage Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of ice storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Permafrost
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of permafrost, if any, within the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Soil --> Hydrology --> Drainage
TODO
13.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General describe how drainage is included in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
Different types of runoff represented by the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Soil --> Heat Treatment
TODO
14.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of how heat treatment properties are defined
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of soil heat scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil heat treatment tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.5. Heat Storage
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the method of heat storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.6. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe processes included in the treatment of soil heat
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Snow
Land surface snow
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of snow in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.3. Number Of Snow Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of snow levels used in the land surface scheme/model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Density
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow density
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.5. Water Equivalent
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the snow water equivalent
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.6. Heat Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the heat content of snow
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.7. Temperature
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow temperature
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.8. Liquid Water Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow liquid water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.9. Snow Cover Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify cover fractions used in the surface snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.10. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Snow related processes in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.11. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Snow --> Snow Albedo
TODO
16.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of snow-covered land albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
*If prognostic, *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17. Vegetation
Land surface vegetation
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vegetation in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 17.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of vegetation scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.3. Dynamic Vegetation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there dynamic evolution of vegetation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.4. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vegetation tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.5. Vegetation Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Vegetation classification used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.6. Vegetation Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of vegetation types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.7. Biome Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of biome types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.8. Vegetation Time Variation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How the vegetation fractions in each tile are varying with time
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.9. Vegetation Map
Is Required: FALSE Type: STRING Cardinality: 0.1
If vegetation fractions are not dynamically updated , describe the vegetation map used (common name and reference, if possible)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.10. Interception
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is vegetation interception of rainwater represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.11. Phenology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.12. Phenology Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.13. Leaf Area Index
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.14. Leaf Area Index Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.15. Biomass
Is Required: TRUE Type: ENUM Cardinality: 1.1
*Treatment of vegetation biomass *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.16. Biomass Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biomass
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.17. Biogeography
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.18. Biogeography Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.19. Stomatal Resistance
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify what the vegetation stomatal resistance depends on
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.20. Stomatal Resistance Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation stomatal resistance
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.21. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the vegetation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18. Energy Balance
Land surface energy balance
18.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of energy balance in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the energy balance tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 18.3. Number Of Surface Temperatures
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The maximum number of distinct surface temperatures in a grid cell (for example, each subgrid tile may have its own temperature)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.4. Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify the formulation method for land surface evaporation, from soil and vegetation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe which processes are included in the energy balance scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19. Carbon Cycle
Land surface carbon cycle
19.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of carbon cycle in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the carbon cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of carbon cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19.4. Anthropogenic Carbon
Is Required: FALSE Type: ENUM Cardinality: 0.N
Describe the treament of the anthropogenic carbon pool
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.5. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the carbon scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 20. Carbon Cycle --> Vegetation
TODO
20.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.3. Forest Stand Dynamics
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of forest stand dyanmics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21. Carbon Cycle --> Vegetation --> Photosynthesis
TODO
21.1. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for photosynthesis (e.g. type of photosynthesis, distinction between C3 and C4 grasses, Nitrogen depencence, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
TODO
22.1. Maintainance Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for maintainence respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Growth Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for growth respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23. Carbon Cycle --> Vegetation --> Allocation
TODO
23.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the allocation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. Allocation Bins
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify distinct carbon bins used in allocation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Allocation Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how the fractions of allocation are calculated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 24. Carbon Cycle --> Vegetation --> Phenology
TODO
24.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the phenology scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 25. Carbon Cycle --> Vegetation --> Mortality
TODO
25.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the mortality scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26. Carbon Cycle --> Litter
TODO
26.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 27. Carbon Cycle --> Soil
TODO
27.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 28. Carbon Cycle --> Permafrost Carbon
TODO
28.1. Is Permafrost Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is permafrost included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.2. Emitted Greenhouse Gases
Is Required: FALSE Type: STRING Cardinality: 0.1
List the GHGs emitted
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.4. Impact On Soil Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the impact of permafrost on soil properties
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Nitrogen Cycle
Land surface nitrogen cycle
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the nitrogen cycle in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the notrogen cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 29.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of nitrogen cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the nitrogen scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30. River Routing
Land surface river routing
30.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of river routing in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the river routing, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river routing scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Grid Inherited From Land Surface
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the grid inherited from land surface?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.5. Grid Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of grid, if not inherited from land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.6. Number Of Reservoirs
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of reservoirs
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.7. Water Re Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
TODO
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.8. Coupled To Atmosphere
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Is river routing coupled to the atmosphere model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.9. Coupled To Land
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the coupling between land and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.10. Quantities Exchanged With Atmosphere
Is Required: FALSE Type: ENUM Cardinality: 0.N
If couple to atmosphere, which quantities are exchanged between river routing and the atmosphere model components?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.11. Basin Flow Direction Map
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of basin flow direction map is being used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.12. Flooding
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the representation of flooding, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.13. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the river routing
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31. River Routing --> Oceanic Discharge
TODO
31.1. Discharge Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify how rivers are discharged to the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Quantities Transported
Is Required: TRUE Type: ENUM Cardinality: 1.N
Quantities that are exchanged from river-routing to the ocean model component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Lakes
Land surface lakes
32.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lakes in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.2. Coupling With Rivers
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are lakes coupled to the river routing model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 32.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of lake scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Quantities Exchanged With Rivers
Is Required: FALSE Type: ENUM Cardinality: 0.N
If coupling with rivers, which quantities are exchanged between the lakes and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.5. Vertical Grid
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vertical grid of lakes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.6. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the lake scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33. Lakes --> Method
TODO
33.1. Ice Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is lake ice included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.2. Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of lake albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.3. Dynamics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which dynamics of lakes are treated? horizontal, vertical, etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.4. Dynamic Lake Extent
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a dynamic lake extent scheme included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.5. Endorheic Basins
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Basins not flowing to ocean included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Lakes --> Wetlands
TODO
34.1. Description
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of wetlands, if any
End of explanation
"""
|
AlexanderAda/NioGuardSecurityLab | Courses/ML in Cybersecurity/Classification/Phishing detection/Phishing detection with ML and feature scaling.ipynb | mit | # Load CSV
import pandas as pd
import numpy as np
filename = 'Examples - Phishing clasification2.csv'
# Specify the names of attributes if the header is not availabel in a CSV file
#names = ['Registrar', 'Lifetime', 'Country', 'Class']
# Loading with NumPy
#raw_data = open(filename, 'rt')
#data = numpy.loadtxt(raw_data, delimiter=",")
# Loading with Pandas
data = pd.read_csv(filename)
print(data.shape)
#data
#data.dtypes
# Transforming 'object' data to 'categorical' to get numerical (ordinal numbers) representation
data['Registrar'] = data['Registrar'].astype('category')
data['Country'] = data['Country'].astype('category')
data['Protocol'] = data['Protocol'].astype('category')
data['Class'] = data['Class'].astype('category')
data['Registrar_code'] = data['Registrar'].cat.codes
data['Country_code'] = data['Country'].cat.codes
data['Protocol_code'] = data['Protocol'].cat.codes
data['Class_code'] = data['Class'].cat.codes
#data['Lifetime'] = data['Lifetime']*100
#data.dtypes
#pd.options.display.max_rows=1000
data
#pd.options.display.max_rows=100
from sklearn.preprocessing import MinMaxScaler
min_max_scaler = MinMaxScaler()
X_raw = data[['Registrar_code', 'Lifetime', 'Country_code', 'Protocol_code']].values #Feature Matrix
X = min_max_scaler.fit_transform(X_raw)
y = data['Class_code'].values #Target Variable
feature_names = data[['Registrar_code', 'Lifetime', 'Country_code', 'Protocol_code']].columns.values
#print(feature_names)
target_names = data['Class'].cat.categories
country_names = data['Country'].cat.categories
registrar_names = data['Registrar'].cat.categories
protocol_names = data['Protocol'].cat.categories
#print(target_names, country_names, registrar_names)
import matplotlib.pyplot as plt
x_index = 1
y_index = 3
# this formatter will label the colorbar with the correct target names
formatter = plt.FuncFormatter(lambda i, *args: target_names[int(i)])
plt.scatter(X[:, x_index], X[:, y_index], c=y, cmap=plt.cm.get_cmap('Paired', 2))
plt.colorbar(ticks=[0, 1], format=formatter)
plt.clim(-0.5, 1.5)
plt.xlabel(feature_names[x_index])
plt.ylabel(feature_names[y_index]);
plt.show()
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(1, figsize=(10, 8))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, 1, 1], elev=30, azim=100)
ax.scatter(X[:, 1], X[:, 2], X[:, 3], lw=2, c=y, cmap='Paired')
ax.set_xlabel(feature_names[1])
ax.set_ylabel(feature_names[2]);
ax.set_zlabel(feature_names[3]);
plt.show()
from sklearn import neighbors
# create the model
knn = neighbors.KNeighborsClassifier(n_neighbors=5)
# fit the model
knn.fit(X, y)
# call the "predict" method:
registrar_code = 48
lifetime = 2
country_code = 28
protocol_code = 1
result = knn.predict([[registrar_code, lifetime, country_code, protocol_code],])
#print(target_names)
print(result, target_names[result[0]], ": ", registrar_names[registrar_code], lifetime, country_names[country_code], protocol_names[protocol_code] )
from matplotlib.colors import ListedColormap
n_neighbors = 5
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['cyan', 'red'])
cmap_bold = ListedColormap(['blue', 'orange'])
# Get '1: Lifetime' and '2: Country' attributes only
x_index = 1
y_index = 2
X2 = X[:,[x_index, y_index]]
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
knn = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
knn.fit(X2, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X2[:, 0].min() - 1, X2[:, 0].max() + 1
y_min, y_max = X2[:, 1].min() - 1, X2[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X2[:, 0], X2[:, 1], c=y, cmap=cmap_bold,
edgecolor='k', s=20)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("2-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.xlabel(feature_names[x_index])
plt.ylabel(feature_names[y_index]);
plt.show()
"""
Explanation: Clasification of phishng and benign URLs
Loading dataset from CSV file
Data exploration with 2D and 3D plots
Classification with KNN
Drawing a boundary between classes with KNN
Dimensionality reduction with PCA and t-SNE
Clustering with k-Means
End of explanation
"""
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(X)
X_reduced = pca.transform(X)
print("Reduced dataset shape:", X_reduced.shape)
# PCA only
plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, cmap='Paired')
print("Meaning of the components:")
for component in pca.components_:
print(" + ".join("%.3f x %s" % (value, name)
for value, name in zip(component, feature_names)))
"""
Explanation: Dimensionality reduction with PCA
End of explanation
"""
from sklearn.manifold import TSNE
X_reduced2 = TSNE(n_components=2).fit_transform(X)
# PCA + t-SNE
X_reduced3 = TSNE(n_components=2).fit_transform(X_reduced)
print("Reduced dataset shape:", X_reduced3.shape)
# t-SNE only
plt.scatter(X_reduced2[:, 0], X_reduced2[:, 1], c=y, cmap='Paired')
# PCA + t-SNE
plt.scatter(X_reduced3[:, 0], X_reduced3[:, 1], c=y, cmap='Paired')
"""
Explanation: Dimensionality reduction with t-SNE
End of explanation
"""
from sklearn.cluster import KMeans
k_means = KMeans(n_clusters=2, random_state=0) # Fixing the RNG in kmeans
k_means.fit(X)
y_pred = k_means.predict(X)
plt.scatter(X_reduced2[:, 0], X_reduced2[:, 1], c=y_pred, cmap='Paired');
TP = 0
TN = 0
FP = 0
FN = 0
for i in range (0, len(y)):
#print(i, ":", y[i])
if (y[i] == 1): # Positive
if (y[i] == y_pred[i]):
TP+=1
else:
FN+=1
else:
if (y[i] == y_pred[i]):
TN+=1
else:
FP+=1
print("TP =", TP, "TN =", TN, "FP =", FP, "FN =", FN)
TPR = TP / (TP+FN)
TNR = TN / (TN+FP)
FPR = FP / (FP+TN)
FNR = FN / (TP+FN)
PPV = (TP+TN) / (TP+TN+FP+FN)
NPV = TN / (TN+FN)
Fmeasure = 2*PPV*TPR / (PPV + TPR)
print("TPR =", TPR, "TNR =", TNR, "FPR =", FPR, "FNR =", FNR, "PPV =", PPV, "NPV =", NPV, "F-measure =", Fmeasure)
"""
Explanation: Clustering: K-means
End of explanation
"""
|
oasis-open/cti-python-stix2 | docs/guide/markings.ipynb | bsd-3-clause | from stix2 import Indicator, TLP_AMBER
indicator = Indicator(pattern_type="stix",
pattern="[file:hashes.md5 = 'd41d8cd98f00b204e9800998ecf8427e']",
object_marking_refs=TLP_AMBER)
print(indicator.serialize(pretty=True))
"""
Explanation: Data Markings
Creating Objects With Data Markings
To create an object with a (predefined) TLP marking to an object, just provide it as a keyword argument to the constructor. The TLP markings can easily be imported from python-stix2.
End of explanation
"""
from stix2 import MarkingDefinition, StatementMarking
marking_definition = MarkingDefinition(
definition_type="statement",
definition=StatementMarking(statement="Copyright 2017, Example Corp")
)
print(marking_definition.serialize(pretty=True))
"""
Explanation: If you’re creating your own marking (for example, a Statement marking), first create the statement marking:
End of explanation
"""
indicator2 = Indicator(pattern_type="stix",
pattern="[file:hashes.md5 = 'd41d8cd98f00b204e9800998ecf8427e']",
object_marking_refs=marking_definition)
print(indicator2.serialize(pretty=True))
indicator3 = Indicator(pattern_type="stix",
pattern="[file:hashes.md5 = 'd41d8cd98f00b204e9800998ecf8427e']",
object_marking_refs="marking-definition--f88d31f6-486f-44da-b317-01333bde0b82")
print(indicator3.serialize(pretty=True))
"""
Explanation: Then you can add it to an object as it’s being created (passing either full object or the the ID as a keyword argument, like with relationships).
End of explanation
"""
from stix2 import Malware, TLP_WHITE
malware = Malware(name="Poison Ivy",
description="A ransomware related to ...",
is_family=False,
granular_markings=[
{
"selectors": ["description"],
"marking_ref": marking_definition
},
{
"selectors": ["name"],
"marking_ref": TLP_WHITE
}
])
print(malware.serialize(pretty=True))
"""
Explanation: Granular markings work in the same way, except you also need to provide a full granular-marking object (including the selector).
End of explanation
"""
Malware(name="Poison Ivy",
description="A ransomware related to ...",
is_family=False,
granular_markings=[
{
"selectors": ["title"],
"marking_ref": marking_definition
}
])
"""
Explanation: Make sure that the selector is a field that exists and is populated on the object, otherwise this will cause an error:
End of explanation
"""
indicator4 = indicator.add_markings(marking_definition)
print(indicator4.serialize(pretty=True))
"""
Explanation: Adding Data Markings To Existing Objects
Several functions exist to support working with data markings.
Both object markings and granular markings can be added to STIX objects which have already been created.
Note: Doing so will create a new version of the object (note the updated modified time).
End of explanation
"""
indicator5 = indicator4.remove_markings(marking_definition)
print(indicator5.serialize(pretty=True))
"""
Explanation: You can also remove specific markings from STIX objects. This will also create a new version of the object.
End of explanation
"""
from stix2 import TLP_GREEN
indicator6 = indicator5.set_markings([TLP_GREEN, marking_definition])
print(indicator6.serialize(pretty=True))
"""
Explanation: The markings on an object can be replaced with a different set of markings:
End of explanation
"""
indicator7 = indicator5.clear_markings()
print(indicator7.serialize(pretty=True))
"""
Explanation: STIX objects can also be cleared of all markings with clear_markings():
End of explanation
"""
indicator6.get_markings()
"""
Explanation: All of these functions can be used for granular markings by passing in a list of selectors. Note that they will create new versions of the objects.
Evaluating Data Markings
You can get a list of the object markings on a STIX object:
End of explanation
"""
from stix2 import get_markings
get_markings(malware, 'name')
"""
Explanation: To get a list of the granular markings on an object, pass the object and a list of selectors to get_markings():
End of explanation
"""
malware.get_markings('name')
"""
Explanation: You can also call get_markings() as a method on the STIX object.
End of explanation
"""
indicator.is_marked(TLP_AMBER.id)
malware.is_marked(TLP_WHITE.id, 'name')
malware.is_marked(TLP_WHITE.id, 'description')
"""
Explanation: Finally, you may also check if an object is marked by a specific markings. Again, for granular markings, pass in the selector or list of selectors.
End of explanation
"""
from stix2 import Indicator
v21_indicator = Indicator(
description="Una descripcion sobre este indicador",
pattern_type="stix",
pattern="[file:hashes.md5 = 'd41d8cd98f00b204e9800998ecf8427e']",
object_marking_refs=['marking-definition--f88d31f6-486f-44da-b317-01333bde0b82'],
indicator_types=['malware'],
granular_markings=[
{
'selectors': ['description'],
'lang': 'es'
},
{
'selectors': ['description'],
'marking_ref': 'marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da'
}
]
)
print(v21_indicator.serialize(pretty=True))
# Gets both lang and marking_ref markings for 'description'
print(v21_indicator.get_markings('description'))
# Exclude lang markings from results
print(v21_indicator.get_markings('description', lang=False))
# Exclude marking-definition markings from results
print(v21_indicator.get_markings('description', marking_ref=False))
"""
Explanation: Extracting Lang Data Markings or marking-definition Data Markings
If you need a specific kind of marking, you can also filter them using the API. By default the library will get both types of markings by default. You can choose between lang=True/False or marking_ref=True/False depending on your use-case.
End of explanation
"""
# By default, both types of markings will be removed
print(v21_indicator.clear_markings("description").serialize(pretty=True))
# If lang is False, no lang markings will be removed
print(v21_indicator.clear_markings("description", lang=False).serialize(pretty=True))
# If marking_ref is False, no marking-definition markings will be removed
print(v21_indicator.clear_markings("description", marking_ref=False).serialize(pretty=True))
"""
Explanation: In this same manner, calls to clear_markings and set_markings also have the ability to operate in for one or both types of markings.
End of explanation
"""
|
bashtage/statsmodels | examples/notebooks/regression_diagnostics.ipynb | bsd-3-clause | %matplotlib inline
from statsmodels.compat import lzip
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
import matplotlib.pyplot as plt
# Load data
url = "https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/HistData/Guerry.csv"
dat = pd.read_csv(url)
# Fit regression model (using the natural log of one of the regressors)
results = smf.ols("Lottery ~ Literacy + np.log(Pop1831)", data=dat).fit()
# Inspect the results
print(results.summary())
"""
Explanation: Regression diagnostics
This example file shows how to use a few of the statsmodels regression diagnostic tests in a real-life context. You can learn about more tests and find out more information about the tests here on the Regression Diagnostics page.
Note that most of the tests described here only return a tuple of numbers, without any annotation. A full description of outputs is always included in the docstring and in the online statsmodels documentation. For presentation purposes, we use the zip(name,test) construct to pretty-print short descriptions in the examples below.
Estimate a regression model
End of explanation
"""
name = ["Jarque-Bera", "Chi^2 two-tail prob.", "Skew", "Kurtosis"]
test = sms.jarque_bera(results.resid)
lzip(name, test)
"""
Explanation: Normality of the residuals
Jarque-Bera test:
End of explanation
"""
name = ["Chi^2", "Two-tail probability"]
test = sms.omni_normtest(results.resid)
lzip(name, test)
"""
Explanation: Omni test:
End of explanation
"""
from statsmodels.stats.outliers_influence import OLSInfluence
test_class = OLSInfluence(results)
test_class.dfbetas[:5, :]
"""
Explanation: Influence tests
Once created, an object of class OLSInfluence holds attributes and methods that allow users to assess the influence of each observation. For example, we can compute and extract the first few rows of DFbetas by:
End of explanation
"""
from statsmodels.graphics.regressionplots import plot_leverage_resid2
fig, ax = plt.subplots(figsize=(8, 6))
fig = plot_leverage_resid2(results, ax=ax)
"""
Explanation: Explore other options by typing dir(influence_test)
Useful information on leverage can also be plotted:
End of explanation
"""
np.linalg.cond(results.model.exog)
"""
Explanation: Other plotting options can be found on the Graphics page.
Multicollinearity
Condition number:
End of explanation
"""
name = ["Lagrange multiplier statistic", "p-value", "f-value", "f p-value"]
test = sms.het_breuschpagan(results.resid, results.model.exog)
lzip(name, test)
"""
Explanation: Heteroskedasticity tests
Breush-Pagan test:
End of explanation
"""
name = ["F statistic", "p-value"]
test = sms.het_goldfeldquandt(results.resid, results.model.exog)
lzip(name, test)
"""
Explanation: Goldfeld-Quandt test
End of explanation
"""
name = ["t value", "p value"]
test = sms.linear_harvey_collier(results)
lzip(name, test)
"""
Explanation: Linearity
Harvey-Collier multiplier test for Null hypothesis that the linear specification is correct:
End of explanation
"""
|
tanghaibao/goatools | notebooks/goea_nbt3102_all_study_genes.ipynb | bsd-2-clause | # Get http://geneontology.org/ontology/go-basic.obo
from goatools.base import download_go_basic_obo
obo_fname = download_go_basic_obo()
"""
Explanation: Run a GOEA. Print study genes as either IDs symbols
We use data from a 2014 Nature paper:
Computational analysis of cell-to-cell heterogeneity
in single-cell RNA-sequencing data reveals hidden
subpopulations of cells
Note: you must have the Python package, xlrd, installed to run this example.
1. Download Ontologies and Associations
1a. Download Ontologies, if necessary
End of explanation
"""
# Get ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/gene2go.gz
from goatools.base import download_ncbi_associations
file_gene2go = download_ncbi_associations()
"""
Explanation: 1b. Download Associations, if necessary
End of explanation
"""
from goatools.obo_parser import GODag
obodag = GODag("go-basic.obo")
"""
Explanation: 2. Load Ontologies, Associations and Background gene set
2a. Load Ontologies
End of explanation
"""
from __future__ import print_function
from goatools.anno.genetogo_reader import Gene2GoReader
# Read NCBI's gene2go. Store annotations in a list of namedtuples
objanno = Gene2GoReader(file_gene2go, taxids=[10090])
# Get associations for each branch of the GO DAG (BP, MF, CC)
ns2assoc = objanno.get_ns2assc()
for nspc, id2gos in ns2assoc.items():
print("{NS} {N:,} annotated mouse genes".format(NS=nspc, N=len(id2gos)))
"""
Explanation: 2b. Load Associations
End of explanation
"""
from genes_ncbi_10090_proteincoding import GENEID2NT as GeneID2nt_mus
"""
Explanation: 2c. Load Background gene set
In this example, the background is all mouse protein-codinge genes.
Follow the instructions in the background_genes_ncbi notebook to download a set of background population genes from NCBI.
End of explanation
"""
from goatools.goea.go_enrichment_ns import GOEnrichmentStudyNS
goeaobj = GOEnrichmentStudyNS(
GeneID2nt_mus, # List of mouse protein-coding genes
ns2assoc, # geneid/GO associations
obodag, # Ontologies
propagate_counts = False,
alpha = 0.05, # default significance cut-off
methods = ['fdr_bh']) # defult multipletest correction method
"""
Explanation: 3. Initialize a GOEA object
The GOEA object holds the Ontologies, Associations, and background.
Numerous studies can then be run withough needing to re-load the above items.
In this case, we only run one GOEA.
End of explanation
"""
# Data will be stored in this variable
import os
geneid2symbol = {}
# Get xlsx filename where data is stored
ROOT = os.path.dirname(os.getcwd()) # go up 1 level from current working directory
din_xlsx = os.path.join(ROOT, "goatools/test_data/nbt_3102/nbt.3102-S4_GeneIDs.xlsx")
# Read data
if os.path.isfile(din_xlsx):
import xlrd
book = xlrd.open_workbook(din_xlsx)
pg = book.sheet_by_index(0)
for r in range(pg.nrows):
symbol, geneid, pval = [pg.cell_value(r, c) for c in range(pg.ncols)]
if geneid:
geneid2symbol[int(geneid)] = symbol
print('READ: {XLSX}'.format(XLSX=din_xlsx))
else:
raise RuntimeError('CANNOT READ: {XLSX}'.format(XLSX=fin_xlsx))
"""
Explanation: 4. Read study genes
~400 genes from the Nature paper supplemental table 4
End of explanation
"""
# 'p_' means "pvalue". 'fdr_bh' is the multipletest method we are currently using.
geneids_study = geneid2symbol.keys()
goea_results_all = goeaobj.run_study(geneids_study)
goea_results_sig = [r for r in goea_results_all if r.p_fdr_bh < 0.05]
"""
Explanation: 5. Run Gene Ontology Enrichment Analysis (GOEA)
You may choose to keep all results or just the significant results. In this example, we choose to keep only the significant results.
End of explanation
"""
goeaobj.wr_xlsx("nbt3102_symbols.xlsx", goea_results_sig, itemid2name=geneid2symbol)
goeaobj.wr_xlsx("nbt3102_geneids.xlsx", goea_results_sig)
"""
Explanation: 6. Write results to an Excel file and to a text file
End of explanation
"""
|
Wx1ng/Python4DataScience.CH | Series_1_Scientific_Python/S1EP3_Pandas.ipynb | cc0-1.0 | import codecs
import requests
import numpy as np
import scipy as sp
import scipy.stats as spstat
import pandas as pd
import datetime
import json
r = requests.get("http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data")
with codecs.open('S1EP3_Iris.txt','w',encoding='utf-8') as f:
f.write(r.text)
with codecs.open('S1EP3_Iris.txt','r',encoding='utf-8') as f:
lines = f.readlines()
for idx,line in enumerate(lines):
print line,
if idx==10:
break
"""
Explanation: 和大熊猫们(Pandas)一起游戏吧!Wx1ng修改测试git命令233
Pandas是Python的一个用于数据分析的库: http://pandas.pydata.org
API速查:http://pandas.pydata.org/pandas-docs/stable/api.html
基于NumPy,SciPy的功能,在其上补充了大量的数据操作(Data Manipulation)功能。
统计、分组、排序、透视表自由转换,如果你已经很熟悉结构化数据库(RDBMS)与Excel的功能,就会知道Pandas有过之而无不及!
0. 上手玩:Why Pandas?
普通的程序员看到一份数据会怎么做?
End of explanation
"""
import pandas as pd
irisdata = pd.read_csv('S1EP3_Iris.txt',header = None, encoding='utf-8')
irisdata
"""
Explanation: Pandas的意义就在于
快速的识别结构化数据
End of explanation
"""
cnames = ['sepal_length','sepal_width','petal_length','petal_width','class']
irisdata.columns = cnames
irisdata
"""
Explanation: 快速的操作元数据
End of explanation
"""
irisdata[irisdata['petal_width']==irisdata.petal_width.max()]
"""
Explanation: 快速过滤
End of explanation
"""
irisdata.iloc[::30,:2]
"""
Explanation: 快速切片
End of explanation
"""
print irisdata['class'].value_counts()
for x in xrange(4):
s = irisdata.iloc[:,x]
print '{0:<12}'.format(s.name.upper()), " Statistics: ", \
'{0:>5} {1:>5} {2:>5} {3:>5}'.format(s.max(), s.min(), round(s.mean(),2),round(s.std(),2))
"""
Explanation: 快速统计
End of explanation
"""
slogs = lambda x:sp.log(x)*x
entpy = lambda x:sp.exp((slogs(x.sum())-x.map(slogs).sum())/x.sum())
irisdata.groupby('class').agg(entpy)
"""
Explanation: 快速“MapReduce”
End of explanation
"""
Series1 = pd.Series(np.random.randn(4))
print Series1,type(Series1)
print Series1.index
print Series1.values
"""
Explanation: 1. 欢迎来到大熊猫世界
Pandas的重要数据类型
DataFrame(二维表)
Series(一维序列)
Index(行索引,行级元数据)
1.1 Series:pandas的长枪(数据表中的一列或一行,观测向量,一维数组...)
数据世界中对于任意一个个体的全面观测,或者对于任意一组个体某一属性的观测,全部可以抽象为Series的概念。
用值构建一个Series:
由默认index和values组成。
End of explanation
"""
print Series1>0
print Series1[Series1>0]
"""
Explanation: Series支持过滤的原理就如同NumPy:
End of explanation
"""
print Series1*2
print Series1+5
"""
Explanation: 当然也支持Broadcasting:
End of explanation
"""
print np.exp(Series1)
#NumPy Universal Function
f_np = np.frompyfunc(lambda x:np.exp(x*2+5),1,1)
print f_np(Series1)
"""
Explanation: 以及Universal Function:
End of explanation
"""
Series2 = pd.Series(Series1.values,index=['norm_'+unicode(i) for i in xrange(4)])
print Series2,type(Series2)
print Series2.index
print type(Series2.index)
print Series2.values
"""
Explanation: 在序列上就使用行标,而不是创建一个2列的数据表,能够轻松辨别哪里是数据,哪里是元数据:
End of explanation
"""
print Series2[['norm_0','norm_3']]
print 'norm_0' in Series2
print 'norm_6' in Series2
"""
Explanation: 虽然行是有顺序的,但是仍然能够通过行级的index来访问到数据:
(当然也不尽然像Ordered Dict,因为行索引甚至可以重复,不推荐重复的行索引不代表不能用)
End of explanation
"""
print Series1.index
"""
Explanation: 默认行索引就像行号一样:
End of explanation
"""
Series3_Dict = {"Japan":"Tokyo","S.Korea":"Seoul","China":"Beijing"}
Series3_pdSeries = pd.Series(Series3_Dict)
print Series3_pdSeries
print Series3_pdSeries.values
print Series3_pdSeries.index
"""
Explanation: 从Key不重复的Ordered Dict或者从Dict来定义Series就不需要担心行索引重复:
End of explanation
"""
Series4_IndexList = ["Japan","China","Singapore","S.Korea"]
Series4_pdSeries = pd.Series( Series3_Dict ,index = Series4_IndexList)
print Series4_pdSeries
print Series4_pdSeries.values
print Series4_pdSeries.index
print Series4_pdSeries.isnull()
print Series4_pdSeries.notnull()
"""
Explanation: 与Dict区别一: 有序
End of explanation
"""
Series5_IndexList = ['A','B','B','C']
Series5 = pd.Series(Series1.values,index = Series5_IndexList)
print Series5
print Series5[['B','A']]
"""
Explanation: 与Dict区别二: index内值可以重复,尽管不推荐。
End of explanation
"""
print Series4_pdSeries.name
print Series4_pdSeries.index.name
Series4_pdSeries.name = "Capital Series"
Series4_pdSeries.index.name = "Nation"
print Series4_pdSeries
pd.DataFrame(Series4_pdSeries)
"""
Explanation: 整个序列级别的元数据信息:name
当数据序列以及index本身有了名字,就可以更方便的进行后续的数据关联啦!
End of explanation
"""
dataNumPy = np.asarray([('Japan','Tokyo',4000),\
('S.Korea','Seoul',1300),('China','Beijing',9100)])
DF1 = pd.DataFrame(dataNumPy,columns=['nation','capital','GDP'])
DF1
"""
Explanation: 1.2 DataFrame:pandas的战锤(数据表,二维数组)
Series的有序集合,就像R的DataFrame一样方便。
仔细想想,绝大部分的数据形式都可以表现为DataFrame。
从NumPy二维数组、从文件或者从数据库定义:数据虽好,勿忘列名
End of explanation
"""
dataDict = {'nation':['Japan','S.Korea','China'],\
'capital':['Tokyo','Seoul','Beijing'],'GDP':[4900,1300,9100]}
DF2 = pd.DataFrame(dataDict)
DF2
"""
Explanation: 等长的列数据保存在一个字典里(JSON):很不幸,字典key是无序的
End of explanation
"""
DF21 = pd.DataFrame(DF2,columns=['nation','capital','GDP'])
DF21
DF22 = pd.DataFrame(DF2,columns=['nation','capital','GDP'],index = [2,0,1])
DF22
"""
Explanation: 从另一个DataFrame定义DataFrame:啊,强迫症犯了!
End of explanation
"""
print DF22.nation
print DF22.capital
print DF22['GDP']
"""
Explanation: 从DataFrame中取出列?两种方法(与JavaScript完全一致!)
'.'的写法容易与其他预留关键字产生冲突
'[ ]'的写法最安全。
End of explanation
"""
print DF22[0:1] #给出的实际是DataFrame
print DF22.ix[0] #通过对应Index给出行
"""
Explanation: 从DataFrame中取出行?(至少)两种方法:
End of explanation
"""
print DF22.iloc[0,:]
print DF22.iloc[:,0]
"""
Explanation: 像NumPy切片一样的终极招式:iloc
End of explanation
"""
DF22['population'] = [1600,130,55]
DF22['region'] = 'East_Asian'
DF22
"""
Explanation: 听说你从Alter Table地狱来,大熊猫笑了
然而动态增加列无法用"."的方式完成,只能用"[ ]"
End of explanation
"""
index_names = ['a','b','c']
Series_for_Index = pd.Series(index_names)
print pd.Index(index_names)
print pd.Index(Series_for_Index)
"""
Explanation: 1.3 Index:pandas进行数据操纵的鬼牌(行级索引)
行级索引是
元数据
可能由真实数据产生,因此可以视作数据
可以由多重索引也就是多个列组合而成
可以和列名进行交换,也可以进行堆叠和展开,达到Excel透视表效果
Index有四种...哦不,很多种写法,一些重要的索引类型包括
pd.Index(普通)
Int64Index(数值型索引)
MultiIndex(多重索引,在数据操纵中更详细描述)
DatetimeIndex(以时间格式作为索引)
PeriodIndex (含周期的时间格式作为索引)
直接定义普通索引,长得就和普通的Series一样
End of explanation
"""
index_names = ['a','b','c']
index0 = pd.Index(index_names)
print index0.get_values()
index0[2] = 'd'
"""
Explanation: 可惜Immutable,牢记!
End of explanation
"""
#print [('Row_'+str(x+1),'Col_'+str(y+1)) for x in xrange(4) for y in xrange(4)]
multi1 = pd.Index([('Row_'+str(x+1),'Col_'+str(y+1)) for x in xrange(4) for y in xrange(4)])
multi1.name = ['index1','index2']
print multi1
"""
Explanation: 扔进去一个含有多元组的List,就有了MultiIndex
可惜,如果这个List Comprehension改成小括号,就不对了。
End of explanation
"""
data_for_multi1 = pd.Series(xrange(0,16),index=multi1)
data_for_multi1
data_for_multi1.unstack()
data_for_multi1.unstack().stack()
"""
Explanation: 对于Series来说,如果拥有了多重Index,数据,变形!
下列代码说明:
二重MultiIndex的Series可以unstack()成DataFrame
DataFrame可以stack成拥有二重MultiIndex的Series
End of explanation
"""
multi2 = pd.Index([('Row_'+str(x),'Col_'+str(y+1)) \
for x in xrange(5) for y in xrange(x)])
multi2
data_for_multi2 = pd.Series(np.arange(10),index = multi2)
data_for_multi2
data_for_multi2.unstack()
data_for_multi2.unstack().stack()
"""
Explanation: 我们来看一下非平衡数据的例子:
Row_1,2,3,4和Col_1,2,3,4并不是全组合的。
End of explanation
"""
dates = [datetime.datetime(2015,1,1),datetime.datetime(2015,1,8),datetime.datetime(2015,1,30)]
pd.DatetimeIndex(dates)
"""
Explanation: DateTime标准库如此好用,你值得拥有
End of explanation
"""
periodindex1 = pd.period_range('2015-01','2015-04',freq='M')
print periodindex1
"""
Explanation: 如果你不仅需要时间格式统一,时间频率也要统一的话
End of explanation
"""
print periodindex1.asfreq('D',how='start')
print periodindex1.asfreq('D',how='end')
"""
Explanation: 月级精度和日级精度如何转换?
有的公司统一以1号代表当月,有的公司统一以最后一天代表当月,转化起来很麻烦,可以asfreq
End of explanation
"""
periodindex_mon = pd.period_range('2015-01','2015-03',freq='M').asfreq('D',how='start')
periodindex_day = pd.period_range('2015-01-01','2015-03-31',freq='D')
print periodindex_mon
print periodindex_day
"""
Explanation: 最后的最后,我要真正把两种频率的时间精度匹配上?
End of explanation
"""
#print pd.Series(periodindex_mon,index=periodindex_mon).reindex(periodindex_day)
full_ts = pd.Series(periodindex_mon,index=periodindex_mon).reindex(periodindex_day)
full_ts
full_ts = pd.Series(periodindex_mon,index=periodindex_mon).reindex(periodindex_day,method='ffill')
full_ts
"""
Explanation: 粗粒度数据+reindex+ffill/bfill
End of explanation
"""
index1 = pd.Index(['A','B','B','C','C'])
index2 = pd.Index(['C','D','E','E','F'])
index3 = pd.Index(['B','C','A'])
print index1.append(index2)
print index1.difference(index2)
print index1.intersection(index2)
print index1.union(index2) # Support unique-value Index well
print index1.isin(index2)
print index1.delete(2)
print index1.insert(0,'K') # Not suggested
print index3.drop('A') # Support unique-value Index well
print index1.is_monotonic,index2.is_monotonic,index3.is_monotonic
print index1.is_unique,index2.is_unique,index3.is_unique
"""
Explanation: 关于索引,方便的操作有?
前面描述过了,索引有序,重复,但一定程度上又能通过key来访问,也就是说,某些集合操作都是可以支持的。
End of explanation
"""
print cnames
irisdata = pd.read_csv('S1EP3_Iris.txt',header = None, names = cnames,\
encoding='utf-8')
irisdata[::30]
"""
Explanation: 2. 大熊猫世界来去自如:Pandas的I/O
老生常谈,从基础来看,我们仍然关心pandas对于与外部数据是如何交互的。
2.1 结构化数据输入输出
read_csv与to_csv 是一对输入输出的工具,read_csv直接返回pandas.DataFrame,而to_csv只要执行命令即可写文件
read_table:功能类似
read_fwf:操作fixed width file
read_excel与to_excel方便的与excel交互
还记得刚开始的例子吗?
header 表示数据中是否存在列名,如果在第0行就写就写0,并且开始读数据时跳过相应的行数,不存在可以写none
names 表示要用给定的列名来作为最终的列名
encoding 表示数据集的字符编码,通常而言一份数据为了方便的进行文件传输都以utf-8作为标准
提问:下列例子中,header=4,names=cnames时,究竟会读到怎样的数据?
End of explanation
"""
irisdata.to_excel('S1EP3_irisdata.xls',index = None,encoding='utf-8')
irisdata_from_excel = pd.read_excel('S1EP3_irisdata.xls',header=0, encoding='utf-8')
irisdata_from_excel[::30]
"""
Explanation: 希望了解全部参数的请移步API:
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html#pandas.read_csv
这里介绍一些常用的参数:
读取处理:
skiprows:跳过一定的行数
nrows:仅读取一定的行数
skipfooter:尾部有固定的行数永不读取
skip_blank_lines:空行跳过
内容处理:
sep/delimiter:分隔符很重要,常见的有逗号,空格和Tab('\t')
na_values:指定应该被当作na_values的数值
thousands:处理数值类型时,每千位分隔符并不统一 (1.234.567,89或者1,234,567.89都可能),此时要把字符串转化为数字需要指明千位分隔符
收尾处理:
index_col:将真实的某列(列的数目,甚至列名)当作index
squeeze:仅读到一列时,不再保存为pandas.DataFrame而是pandas.Series
2.1.x Excel ... ?
对于存储着极为规整数据的Excel而言,其实是没必要一定用Excel来存,尽管Pandas也十分友好的提供了I/O接口。
End of explanation
"""
json_data = [{'name':'Wang','sal':50000,'job':'VP'},\
{'name':'Zhang','job':'Manager','report':'VP'},\
{'name':'Li','sal':5000,'report':'Manager'}]
data_employee = pd.read_json(json.dumps(json_data))
data_employee_ri = data_employee.reindex(columns=['name','job','sal','report'])
data_employee_ri
"""
Explanation: 唯一重要的参数:sheetname=k,标志着一个excel的第k个sheet页将会被取出。(从0开始)
2.2 半结构化数据
JSON:网络传输中常用的一种数据格式。
仔细看一下,实际上这就是我们平时收集到异源数据的风格是一致的:
列名不能完全匹配
关联键可能并不唯一
元数据被保存在数据里
End of explanation
"""
IP = '127.0.0.1'
us = 'root'
pw = '123456'
"""
Explanation: 2.3 数据库连接流程(Optional)
使用下列包,通过数据库配置建立Connection
pymysql
pyODBC
cx_Oracle
通过pandas.read_sql_query,read_sql_table,to_sql进行数据库操作。
Python与数据库的交互方案有很多种,从数据分析师角度看pandas方案比较适合,之后的讲义中会结合SQL语法进行讲解。
进行数据库连接首先你需要类似的这样一组信息:
End of explanation
"""
import pymysql
import pymysql.cursors
connection = pymysql.connect(host=IP,\
user=us,\
password=pw,\
charset='utf8mb4',\
cursorclass=pymysql.cursors.DictCursor)
#pd.read_sql_query("sql",connection)
#df.to_sql('tablename',connection,flavor='mysql')
"""
Explanation: 举例说明如果是MySQL:
End of explanation
"""
pd.DataFrame([np.random.rand(2),np.random.rand(2),np.random.rand(2)],columns=['C1','C2'])
"""
Explanation: 3. 深入Pandas数据操纵
在第一部分的基础上,数据会有更多种操纵方式:
通过列名、行index来取数据,结合ix、iloc灵活的获取数据的一个子集(第一部分已经介绍)
按记录拼接(就像Union All)或者关联(join)
方便的自定义函数映射
排序
缺失值处理
与Excel一样灵活的数据透视表(在第四部分更详细介绍)
3.1 数据整合:方便灵活
3.1.1 横向拼接:直接DataFrame
End of explanation
"""
pd.concat([data_employee_ri,data_employee_ri,data_employee_ri])
pd.concat([data_employee_ri,data_employee_ri,data_employee_ri],ignore_index=True)
"""
Explanation: 3.1.2 横向拼接:Concatenate
End of explanation
"""
pd.merge(data_employee_ri,data_employee_ri,on='name')
pd.merge(data_employee_ri,data_employee_ri,on=['name','job'])
"""
Explanation: 3.1.3 纵向拼接:Merge
根据数据列关联,使用on关键字
可以指定一列或多列
可以使用left_on和right_on
End of explanation
"""
data_employee_ri.index.name = 'index1'
pd.merge(data_employee_ri,data_employee_ri,\
left_index='index1',right_index='index1')
"""
Explanation: 根据index关联,可以直接使用left_index和right_index
End of explanation
"""
DF31xA = pd.DataFrame({'name':[u'老王',u'老张',u'老李'],'sal':[5000,3000,1000]})
DF31xA
DF31xB = pd.DataFrame({'name':[u'老王',u'老刘'],'job':['VP','Manager']})
DF31xB
"""
Explanation: TIPS: 增加how关键字,并指定
* how = 'inner'
* how = 'left'
* how = 'right'
* how = 'outer'
结合how,可以看到merge基本再现了SQL应有的功能,并保持代码整洁。
End of explanation
"""
pd.merge(DF31xA,DF31xB,on='name',how='left')
"""
Explanation: how='left': 保留左表信息
End of explanation
"""
pd.merge(DF31xA,DF31xB,on='name',how='right')
"""
Explanation: how='right': 保留右表信息
End of explanation
"""
pd.merge(DF31xA,DF31xB,on='name',how='inner')
"""
Explanation: how='inner': 保留两表交集信息,这样尽量避免出现缺失值
End of explanation
"""
pd.merge(DF31xA,DF31xB,on='name',how='outer')
"""
Explanation: how='outer': 保留两表并集信息,这样会导致缺失值,但最大程度的整合了已有信息
End of explanation
"""
dataNumPy32 = np.asarray([('Japan','Tokyo',4000),('S.Korea','Seoul',1300),('China','Beijing',9100)])
DF32 = pd.DataFrame(dataNumPy,columns=['nation','capital','GDP'])
DF32
"""
Explanation: 3.2 数据清洗三剑客
接下来的三个功能,map,applymap,apply,功能,是绝大多数数据分析师在数据清洗这一步骤中的必经之路。
他们分别回答了以下问题:
我想根据一列数据新做一列数据,怎么办?(Series->Series)
我想根据整张表的数据新做整张表,怎么办? (DataFrame->DataFrame)
我想根据很多列的数据新做一列数据,怎么办? (DataFrame->Series)
不要再写什么for循环了!改变思维,提高编码和执行效率
End of explanation
"""
def GDP_Factorize(v):
fv = np.float64(v)
if fv > 6000.0:
return 'High'
elif fv < 2000.0:
return 'Low'
else:
return 'Medium'
DF32['GDP_Level'] = DF32['GDP'].map(GDP_Factorize)
DF32['NATION'] = DF32.nation.map(str.upper)
DF32
"""
Explanation: map: 以相同规则将一列数据作一个映射,也就是进行相同函数的处理
End of explanation
"""
DF32.applymap(lambda x: float(x)*2 if x.isdigit() else x.upper())
"""
Explanation: 类似的功能还有applymap,可以对一个dataframe里面每一个元素像map那样全局操作
End of explanation
"""
DF32.apply(lambda x:x['nation']+x['capital']+'_'+x['GDP'],axis=1)
"""
Explanation: apply则可以对一个DataFrame操作得到一个Series
他会有点像我们后面介绍的agg,但是apply可以按行操作和按列操作,用axis控制即可。
End of explanation
"""
dataNumPy33 = np.asarray([('Japan','Tokyo',4000),('S.Korea','Seoul',1300),('China','Beijing',9100)])
DF33 = pd.DataFrame(dataNumPy33,columns=['nation','capital','GDP'])
DF33
DF33.sort(['capital','nation'])
DF33.sort('GDP',ascending=False)
DF33.sort('GDP').sort(ascending=False)
DF33.sort_index(axis=1,ascending=True)
"""
Explanation: 3.3 数据排序
sort: 按一列或者多列的值进行行级排序
sort_index: 根据index里的取值进行排序,而且可以根据axis决定是重排行还是列
End of explanation
"""
DF33
DF33.rank()
DF33.rank(ascending=False)
"""
Explanation: 一个好用的功能:Rank
End of explanation
"""
DF33x = pd.DataFrame({'name':[u'老王',u'老张',u'老李',u'老刘'],'sal':np.array([5000,3000,5000,9000])})
DF33x
"""
Explanation: 注意tied data(相同值)的处理:
* method = 'average'
* method = 'min'
* method = 'max'
* method = 'first'
End of explanation
"""
DF33x.sal.rank()
"""
Explanation: DF33x.rank()默认使用method='average',两条数据相等时,处理排名时大家都用平均值
End of explanation
"""
DF33x.sal.rank(method='min')
"""
Explanation: method='min',处理排名时大家都用最小值
End of explanation
"""
DF33x.sal.rank(method='max')
"""
Explanation: method='max',处理排名时大家都用最大值
End of explanation
"""
DF33x.sal.rank(method='first')
"""
Explanation: method='first',处理排名时谁先出现就先给谁较小的数值。
End of explanation
"""
DF34 = data_for_multi2.unstack()
DF34
"""
Explanation: 3.4 缺失数据处理
End of explanation
"""
DF34.mean(skipna=True)
DF34.mean(skipna=False)
"""
Explanation: 忽略缺失值:
End of explanation
"""
DF34
DF34.fillna(0).mean(axis=1,skipna=False)
"""
Explanation: 如果不想忽略缺失值的话,就需要祭出fillna了:
End of explanation
"""
from IPython.display import Image
Image(filename="S1EP3_group.png")
"""
Explanation: 4. “一组”大熊猫:Pandas的groupby
groupby的功能类似SQL的group by关键字:
Split-Apply-Combine
Split,就是按照规则分组
Apply,通过一定的agg函数来获得输入pd.Series返回一个值的效果
Combine,把结果收集起来
Pandas的groupby的灵活性:
分组的关键字可以来自于index,也可以来自于真实的列数据
分组规则可以通过一列或者多列
End of explanation
"""
irisdata_group = irisdata.groupby('class')
irisdata_group
for level,subsetDF in irisdata_group:
print level
print subsetDF[::20]
"""
Explanation: 分组的具体逻辑
End of explanation
"""
irisdata.groupby('class').agg(\
lambda x:((x-x.mean())**3).sum()*(len(x)-0.0)/\
(len(x)-1.0)/(len(x)-2.0)/(x.std()*np.sqrt((len(x)-0.0)/(len(x)-1.0)))**3 if len(x)>2 else None)
irisdata.groupby('class').agg(spstat.skew)
"""
Explanation: 分组可以快速实现MapReduce的逻辑
Map: 指定分组的列标签,不同的值就会被扔到不同的分组处理
Reduce: 输入多个值,返回一个值,一般可以通过agg实现,agg能接受一个函数
End of explanation
"""
pd.concat([irisdata,irisdata.groupby('class').transform('mean')],axis=1)[::20]
"""
Explanation: 汇总之后的广播操作
在OLAP数据库上,为了避免groupby+join的二次操作,提出了sum()over(partition by)的开窗操作。
在Pandas中,这种操作能够进一步被transform所取代。
End of explanation
"""
factor1 = np.random.randint(0,3,50)
factor2 = np.random.randint(0,2,50)
factor3 = np.random.randint(0,3,50)
values = np.random.randn(50)
hierindexDF = pd.DataFrame({'F1':factor1,'F2':factor2,'F3':factor3,'F4':values})
hierindexDF
hierindexDF_gbsum = hierindexDF.groupby(['F1','F2','F3']).sum()
hierindexDF_gbsum
"""
Explanation: 产生 MultiIndex(多列分组)后的数据透视表操作
一般来说,多列groupby的一个副作用就是.groupby().agg()之后你的行index已经变成了一个多列分组的分级索引。
如果我们希望达到Excel的数据透视表的效果,行和列的索引自由交换,达到统计目的,究竟应该怎么办呢?
End of explanation
"""
hierindexDF_gbsum.index
"""
Explanation: 观察Index:
End of explanation
"""
hierindexDF_gbsum.unstack()
hierindexDF_gbsum.unstack(0)
hierindexDF_gbsum.unstack(1)
hierindexDF_gbsum.unstack([2,0])
"""
Explanation: unstack:
无参数时,把最末index置换到column上
有数字参数时,把指定位置的index置换到column上
有列表参数时,依次把特定位置的index置换到column上
End of explanation
"""
hierindexDF_gbsum.unstack([2,0]).stack([1,2])
"""
Explanation: 更进一步的,stack的功能是和unstack对应,把column上的多级索引换到index上去
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/thu/cmip6/models/sandbox-3/atmoschem.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'thu', 'sandbox-3', 'atmoschem')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmoschem
MIP Era: CMIP6
Institute: THU
Source ID: SANDBOX-3
Topic: Atmoschem
Sub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry.
Properties: 84 (39 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:40
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Timestep Framework --> Split Operator Order
5. Key Properties --> Tuning Applied
6. Grid
7. Grid --> Resolution
8. Transport
9. Emissions Concentrations
10. Emissions Concentrations --> Surface Emissions
11. Emissions Concentrations --> Atmospheric Emissions
12. Emissions Concentrations --> Concentrations
13. Gas Phase Chemistry
14. Stratospheric Heterogeneous Chemistry
15. Tropospheric Heterogeneous Chemistry
16. Photo Chemistry
17. Photo Chemistry --> Photolysis
1. Key Properties
Key properties of the atmospheric chemistry
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmospheric chemistry model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmospheric chemistry model code.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Chemistry Scheme Scope
Is Required: TRUE Type: ENUM Cardinality: 1.N
Atmospheric domains covered by the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: STRING Cardinality: 1.1
Basic approximations made in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables Form
Is Required: TRUE Type: ENUM Cardinality: 1.N
Form of prognostic variables in the atmospheric chemistry component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 1.6. Number Of Tracers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of advected tracers in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.7. Family Approach
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry calculations (not advection) generalized into families of species?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.8. Coupling With Chemical Reactivity
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry transport scheme turbulence is couple with chemical reactivity?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestep Framework
Timestepping in the atmospheric chemistry model
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Mathematical method deployed to solve the evolution of a given variable
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Split Operator Advection Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemical species advection (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Split Operator Physical Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for physics (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Split Operator Chemistry Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemistry (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.5. Split Operator Alternate Order
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.6. Integrated Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep for the atmospheric chemistry model (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3.7. Integrated Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the type of timestep scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Timestep Framework --> Split Operator Order
**
4.1. Turbulence
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.2. Convection
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Precipitation
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.4. Emissions
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.5. Deposition
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.6. Gas Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.7. Tropospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.8. Stratospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.9. Photo Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.10. Aerosols
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Tuning Applied
Tuning methodology for atmospheric chemistry component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid
Atmospheric chemistry grid
6.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the atmopsheric chemistry grid
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
* Does the atmospheric chemistry grid match the atmosphere grid?*
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Resolution
Resolution in the atmospheric chemistry grid
7.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Canonical Horizontal Resolution
Is Required: FALSE Type: STRING Cardinality: 0.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.3. Number Of Horizontal Gridpoints
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.4. Number Of Vertical Levels
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 7.5. Is Adaptive Grid
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Transport
Atmospheric chemistry transport
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview of transport implementation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.2. Use Atmospheric Transport
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is transport handled by the atmosphere, rather than within atmospheric cehmistry?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Transport Details
Is Required: FALSE Type: STRING Cardinality: 0.1
If transport is handled within the atmospheric chemistry scheme, describe it.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Emissions Concentrations
Atmospheric chemistry emissions
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric chemistry emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Emissions Concentrations --> Surface Emissions
**
10.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of the chemical species emitted at the surface that are taken into account in the emissions scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via any other method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Emissions Concentrations --> Atmospheric Emissions
TO DO
11.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an "other method"
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12. Emissions Concentrations --> Concentrations
TO DO
12.1. Prescribed Lower Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the lower boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Prescribed Upper Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the upper boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Gas Phase Chemistry
Atmospheric chemistry transport
13.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview gas phase atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Species included in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.3. Number Of Bimolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of bi-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.4. Number Of Termolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of ter-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.5. Number Of Tropospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.6. Number Of Stratospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.7. Number Of Advected Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of advected species in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.8. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.9. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.10. Wet Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.11. Wet Oxidation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Stratospheric Heterogeneous Chemistry
Atmospheric chemistry startospheric heterogeneous chemistry
14.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview stratospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
"""
Explanation: 14.2. Gas Phase Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Gas phase species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
"""
Explanation: 14.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.5. Sedimentation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sedimentation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Tropospheric Heterogeneous Chemistry
Atmospheric chemistry tropospheric heterogeneous chemistry
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview tropospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Gas Phase Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of gas phase species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
"""
Explanation: 15.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.5. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the tropospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Photo Chemistry
Atmospheric chemistry photo chemistry
16.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric photo chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 16.2. Number Of Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the photo-chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
"""
Explanation: 17. Photo Chemistry --> Photolysis
Photolysis scheme
17.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Photolysis scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.2. Environmental Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.)
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.22/_downloads/17295dea468fcedae97ed8a6f9afc520/plot_decoding_csp_timefreq.ipynb | bsd-3-clause | # Authors: Laura Gwilliams <[email protected]>
# Jean-Remi King <[email protected]>
# Alex Barachant <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mne import Epochs, create_info, events_from_annotations
from mne.io import concatenate_raws, read_raw_edf
from mne.datasets import eegbci
from mne.decoding import CSP
from mne.time_frequency import AverageTFR
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder
"""
Explanation: Decoding in time-frequency space using Common Spatial Patterns (CSP)
The time-frequency decomposition is estimated by iterating over raw data that
has been band-passed at different frequencies. This is used to compute a
covariance matrix over each epoch or a rolling time-window and extract the CSP
filtered signals. A linear discriminant classifier is then applied to these
signals.
End of explanation
"""
event_id = dict(hands=2, feet=3) # motor imagery: hands vs feet
subject = 1
runs = [6, 10, 14]
raw_fnames = eegbci.load_data(subject, runs)
raw = concatenate_raws([read_raw_edf(f) for f in raw_fnames])
# Extract information from the raw file
sfreq = raw.info['sfreq']
events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3))
raw.pick_types(meg=False, eeg=True, stim=False, eog=False, exclude='bads')
raw.load_data()
# Assemble the classifier using scikit-learn pipeline
clf = make_pipeline(CSP(n_components=4, reg=None, log=True, norm_trace=False),
LinearDiscriminantAnalysis())
n_splits = 5 # how many folds to use for cross-validation
cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)
# Classification & time-frequency parameters
tmin, tmax = -.200, 2.000
n_cycles = 10. # how many complete cycles: used to define window size
min_freq = 5.
max_freq = 25.
n_freqs = 8 # how many frequency bins to use
# Assemble list of frequency range tuples
freqs = np.linspace(min_freq, max_freq, n_freqs) # assemble frequencies
freq_ranges = list(zip(freqs[:-1], freqs[1:])) # make freqs list of tuples
# Infer window spacing from the max freq and number of cycles to avoid gaps
window_spacing = (n_cycles / np.max(freqs) / 2.)
centered_w_times = np.arange(tmin, tmax, window_spacing)[1:]
n_windows = len(centered_w_times)
# Instantiate label encoder
le = LabelEncoder()
"""
Explanation: Set parameters and read data
End of explanation
"""
# init scores
freq_scores = np.zeros((n_freqs - 1,))
# Loop through each frequency range of interest
for freq, (fmin, fmax) in enumerate(freq_ranges):
# Infer window size based on the frequency being used
w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds
# Apply band-pass filter to isolate the specified frequencies
raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin',
skip_by_annotation='edge')
# Extract epochs from filtered data, padded by window size
epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size,
proj=False, baseline=None, preload=True)
epochs.drop_bad()
y = le.fit_transform(epochs.events[:, 2])
X = epochs.get_data()
# Save mean scores over folds for each frequency and time window
freq_scores[freq] = np.mean(cross_val_score(estimator=clf, X=X, y=y,
scoring='roc_auc', cv=cv,
n_jobs=1), axis=0)
"""
Explanation: Loop through frequencies, apply classifier and save scores
End of explanation
"""
plt.bar(freqs[:-1], freq_scores, width=np.diff(freqs)[0],
align='edge', edgecolor='black')
plt.xticks(freqs)
plt.ylim([0, 1])
plt.axhline(len(epochs['feet']) / len(epochs), color='k', linestyle='--',
label='chance level')
plt.legend()
plt.xlabel('Frequency (Hz)')
plt.ylabel('Decoding Scores')
plt.title('Frequency Decoding Scores')
"""
Explanation: Plot frequency results
End of explanation
"""
# init scores
tf_scores = np.zeros((n_freqs - 1, n_windows))
# Loop through each frequency range of interest
for freq, (fmin, fmax) in enumerate(freq_ranges):
# Infer window size based on the frequency being used
w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds
# Apply band-pass filter to isolate the specified frequencies
raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin',
skip_by_annotation='edge')
# Extract epochs from filtered data, padded by window size
epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size,
proj=False, baseline=None, preload=True)
epochs.drop_bad()
y = le.fit_transform(epochs.events[:, 2])
# Roll covariance, csp and lda over time
for t, w_time in enumerate(centered_w_times):
# Center the min and max of the window
w_tmin = w_time - w_size / 2.
w_tmax = w_time + w_size / 2.
# Crop data into time-window of interest
X = epochs.copy().crop(w_tmin, w_tmax).get_data()
# Save mean scores over folds for each frequency and time window
tf_scores[freq, t] = np.mean(cross_val_score(estimator=clf, X=X, y=y,
scoring='roc_auc', cv=cv,
n_jobs=1), axis=0)
"""
Explanation: Loop through frequencies and time, apply classifier and save scores
End of explanation
"""
# Set up time frequency object
av_tfr = AverageTFR(create_info(['freq'], sfreq), tf_scores[np.newaxis, :],
centered_w_times, freqs[1:], 1)
chance = np.mean(y) # set chance level to white in the plot
av_tfr.plot([0], vmin=chance, title="Time-Frequency Decoding Scores",
cmap=plt.cm.Reds)
"""
Explanation: Plot time-frequency results
End of explanation
"""
|
ODZ-UJF-AV-CR/osciloskop | vxi.ipynb | gpl-3.0 | import matplotlib.pyplot as plt
import sys
import os
import time
import h5py
import numpy as np
import glob
import vxi11
# Step 0:
# Connect oscilloscope via direct Ethernet link
# Step 1:
# Run "Record" on the oscilloscope
# and wait for 508 frames to be acquired.
# Step 2:
# Run this cell to initialize grabbing.
# This will need a rewrite
class TmcDriver:
def __init__(self, device):
print("Initializing connection to: " + device)
self.device = device
self.instr = vxi11.Instrument(device)
def write(self, command):
self.instr.write(command);
def read(self, length = 500):
return self.instr.read(length)
def read_raw(self, length = 500):
return self.instr.read_raw(length)
def getName(self):
self.write("*IDN?")
return self.read(300)
def ask(self, command):
return self.instr.ask(command)
def sendReset(self):
self.write("*RST") # Be carefull, this real resets an oscilloscope
# Default oscilloscope record timeout [s]
loop_sleep_time = 60
# For Ethernet
#osc = TmcDriver("TCPIP::147.231.24.72::INSTR")
osc = TmcDriver("TCPIP::10.1.1.254::INSTR")
print(osc.ask("*IDN?"))
"""
Explanation: Oscilloskope utility – using Ethernet
End of explanation
"""
filename = 1
if (filename == 1):
for f in glob.iglob("./data/*.h5"): # delete all .h5 files
print 'Deleting', f
os.remove(f)
else:
print 'Not removing old files, as filename {0} is not 1.'.format(filename)
osc.write(':STOP') # start recording
time.sleep(0.5)
while True:
#print(' Enter to continue.')
#raw_input() Wait for key press
osc.write(':FUNC:WREC:OPER REC') # start recording
run_start_time = time.time()
print ' Capturing...'
time.sleep(0.5)
while True:
osc.write(':FUNC:WREC:OPER?') # finish recording?
reply = osc.read()
if reply == 'STOP':
run_time = round(time.time() - run_start_time, 2)
print(' Subrun finished, capturing for %.2f seconds.' % run_time)
break
time.sleep(0.01)
osc.write(':WAV:SOUR CHAN1')
osc.write(':WAV:MODE NORM')
osc.write(':WAV:FORM BYTE')
osc.write(':WAV:POIN 1400')
osc.write(':WAV:XINC?')
xinc = float(osc.read(100))
print 'XINC:', xinc,
osc.write(':WAV:YINC?')
yinc = float(osc.read(100))
print 'YINC:', yinc,
osc.write(':TRIGger:EDGe:LEVel?')
trig = float(osc.read(100))
print 'TRIG:', trig,
osc.write(':WAVeform:YORigin?')
yorig = float(osc.read(100))
print 'YORIGIN:', yorig,
osc.write(':WAVeform:XORigin?')
xorig = float(osc.read(100))
print 'XORIGIN:', xorig,
osc.write(':FUNC:WREP:FEND?') # get number of last frame
frames = int(osc.read(100))
print 'FRAMES:', frames, 'SUBRUN', filename
with h5py.File('./data/data'+'{:02.0f}'.format(filename)+'_'+str(int(round(time.time(),0)))+'.h5', 'w') as hf:
hf.create_dataset('FRAMES', data=(frames)) # write number of frames
hf.create_dataset('XINC', data=(xinc)) # write axis parameters
hf.create_dataset('YINC', data=(yinc))
hf.create_dataset('TRIG', data=(trig))
hf.create_dataset('YORIGIN', data=(yorig))
hf.create_dataset('XORIGIN', data=(xorig))
hf.create_dataset('CAPTURING', data=(run_time))
osc.write(':FUNC:WREP:FCUR 1') # skip to n-th frame
time.sleep(0.5)
for n in range(1,frames+1):
osc.write(':FUNC:WREP:FCUR ' + str(n)) # skip to n-th frame
time.sleep(0.001)
osc.write(':WAV:DATA?') # read data
#time.sleep(0.4)
wave1 = bytearray(osc.read_raw(500))
wave2 = bytearray(osc.read_raw(500))
wave3 = bytearray(osc.read_raw(500))
#wave4 = bytearray(osc.read(500))
#wave = np.concatenate((wave1[11:],wave2[:(500-489)],wave3[:(700-489)]))
wave = np.concatenate((wave1[11:],wave2,wave3[:-1]))
hf.create_dataset(str(n), data=wave)
filename = filename + 1
"""
Explanation: Read repeatedly records from oscilloscope
End of explanation
"""
filename = 1
run_start_time = time.time()
if (filename == 1):
for f in glob.iglob("./data/*.h5"): # delete all .h5 files
print 'Deleting', f
os.remove(f)
else:
print 'Not removing old files, as filename {0} is not 1.'.format(filename)
while True:
osc.write(':WAV:SOUR CHAN1')
osc.write(':WAV:MODE NORM')
osc.write(':WAV:FORM BYTE')
osc.write(':WAV:POIN 1400')
osc.write(':WAV:XINC?')
xinc = float(osc.read(100))
print 'XINC:', xinc,
osc.write(':WAV:YINC?')
yinc = float(osc.read(100))
print 'YINC:', yinc,
osc.write(':TRIGger:EDGe:LEVel?')
trig = float(osc.read(100))
print 'TRIG:', trig,
osc.write(':WAVeform:YORigin?')
yorig = float(osc.read(100))
print 'YORIGIN:', yorig,
osc.write(':WAVeform:XORigin?')
xorig = float(osc.read(100))
print 'XORIGIN:', xorig,
osc.write(':FUNC:WREP:FEND?') # get number of last frame
frames = int(osc.read(100))
print 'FRAMES:', frames, 'SUBRUN', filename
# This is not good if the scaling is different and frames are for example just 254
# if (frames < 508):
# loop_sleep_time += 10
with h5py.File('./data/data'+'{:02.0f}'.format(filename)+'.h5', 'w') as hf:
hf.create_dataset('FRAMES', data=(frames)) # write number of frames
hf.create_dataset('XINC', data=(xinc)) # write axis parameters
hf.create_dataset('YINC', data=(yinc))
hf.create_dataset('TRIG', data=(trig))
hf.create_dataset('YORIGIN', data=(yorig))
hf.create_dataset('XORIGIN', data=(xorig))
osc.write(':FUNC:WREP:FCUR 1') # skip to n-th frame
time.sleep(0.5)
for n in range(1,frames+1):
osc.write(':FUNC:WREP:FCUR ' + str(n)) # skip to n-th frame
time.sleep(0.001)
osc.write(':WAV:DATA?') # read data
#time.sleep(0.4)
wave1 = bytearray(osc.read_raw(500))
wave2 = bytearray(osc.read_raw(500))
wave3 = bytearray(osc.read_raw(500))
#wave4 = bytearray(osc.read(500))
#wave = np.concatenate((wave1[11:],wave2[:(500-489)],wave3[:(700-489)]))
wave = np.concatenate((wave1[11:],wave2,wave3[:-1]))
hf.create_dataset(str(n), data=wave)
filename = filename + 1
osc.write(':FUNC:WREC:OPER REC') # start recording
#print(' Subrun finished, sleeping for %.0f seconds.' % loop_sleep_time)
run_start_time = time.time()
#time.sleep(loop_sleep_time) # delay for capturing
print(' Subrun finished, Enter to continue.')
#raw_input()
time.sleep(100) # delay for capturing
#print(' We were waiting for ', time.time() - run_start_time())
"""
Explanation: Read repeatedly records from oscilloscope
This should be run after the initialization step. Timeout at the end should be enlarged if not all 508 frames are transferred.
End of explanation
"""
first_run_start_time = time.time()
raw_input()
loop_sleep_time = time.time() - first_run_start_time + 15
print loop_sleep_time
loop_sleep_time=60
"""
Explanation: Stopwatch for timing the first loop
End of explanation
"""
|
SheffieldML/GPclust | notebooks/MOHGP_demo.ipynb | gpl-3.0 | %matplotlib inline
%config InlineBackend.figure_format = 'png'#'svg' would be better, but eats memory for these big plots.
from matplotlib import pyplot as plt
import numpy as np
import GPy
import sys
sys.path.append('/home/james/work/gpclust/')
import GPclust
"""
Explanation: Mixtures of Gaussian processes with GPclust
This notebook accompanies the paper
Nonparameteric Clustering of Structured Time Series
James Hensman, Magnus Rattray and Neil D. Lawrence
IEEE TPAMI 2014
The code is available at https://github.com/jameshensman/gpclust . The GPclust module depends on GPy.
The hierachical Gaussian process model was fleshed out in
Hierarchical Bayesian modelling of gene expression time series
across irregularly sampled replicates and clusters
James Hensman, Neil D. Lawrence and Magnus Rattray
http://www.biomedcentral.com/1471-2105/14/252
A simple implementation of hierarchical GPs is available as part of GPy. You may also be interested in the related notebook on hierarchical GPs.
End of explanation
"""
#generate a data set. Here's the sinusoid demo from the manuscript.
Nclust = 10
Nx = 12
Nobs = [np.random.randint(20,31) for i in range(Nclust)] #a random number of realisations in each cluster
X = np.random.rand(Nx,1)
X.sort(0)
#random frequency and phase for each cluster
base_freqs = 2*np.pi + 0.3*(np.random.rand(Nclust)-.5)
base_phases = 2*np.pi*np.random.rand(Nclust)
means = np.vstack([np.tile(np.sin(f*X+p).T,(Ni,1)) for f,p,Ni in zip(base_freqs,base_phases,Nobs)])
#add a lower frequency sinusoid for the noise
freqs = .4*np.pi + 0.01*(np.random.rand(means.shape[0])-.5)
phases = 2*np.pi*np.random.rand(means.shape[0])
offsets = 0.3*np.vstack([np.sin(f*X+p).T for f,p in zip(freqs,phases)])
Y = means + offsets + np.random.randn(*means.shape)*0.05
"""
Explanation: A simple sinusoid dataset
Here's a simulated dataset that contains the simple features that we expect to have in real data sets: smooth processes (here, sinusoids) corrupted by further smooth processes (here, more sinusoids) as well as noise.
End of explanation
"""
#plotting.
x_plot, xmin, xmax = GPy.plotting.matplot_dep.base_plots.x_frame1D(X)
plt.figure(figsize=(18,6))
index_starts = np.hstack([0, np.cumsum(Nobs[:-1])])
index_stops = np.cumsum(Nobs)
for n in range(Nclust):
plt.subplot(2,Nclust/2, n+1)
plt.plot(X, Y[index_starts[n]:index_stops[n]].T, 'b', marker='x',ms=4, mew=1, linewidth=0.2)
plt.plot(x_plot, np.sin(base_freqs[n]*x_plot+base_phases[n]), 'r', linewidth=2)
GPy.plotting.matplot_dep.base_plots.align_subplots(2, Nclust/2, xlim=(xmin, xmax))
"""
Explanation: In the plot below, we show the underlying function for each cluster as a smooth red function, and the data associated with the cluster as thinly connected blue crosses.
End of explanation
"""
k_underlying = GPy.kern.RBF(input_dim=1, variance=0.1, lengthscale=0.1)
k_corruption = GPy.kern.RBF(input_dim=1, variance=0.01, lengthscale=0.1) + GPy.kern.White(1, variance=0.001)
m = GPclust.MOHGP(X, k_underlying, k_corruption, Y, K=10, prior_Z='DP', alpha=1.0)
m.optimize()
m.systematic_splits(verbose=False)
"""
Explanation: Constructing and optimizing a model
Now that we have generated a data set, it's straightforward to build and optimize a clustering model. First, we need to build two GPy kernels (covariance functions), which will be used to model the underlying function and the replication noise, respecively. We'll take a wild stab at the parameters of these covariances, and let the model optimize them for us later.
The two kernels model the underlying function of the cluster, and the deviations of each gene from that underlying function. If we believe that the only corruption of the data from the cluster mean is i.i.d. noise, we can specify a GPy.kern.White covariance. In practise, it's helpful to allow correlated noise. The model of any cluster of genes then has a hierarchical structure, with the unknown cluster-specific mean drawn from a GP, and then each gene in that cluster being drawn from a GP with said unknown mean function.
To optimize the model with the default optimization settings, we call m.optimize(). To invoke the recommended merge-split procedure, call m.systematic_splits(). Note that during the splitting procedure, many calls are made to the optimize function.
End of explanation
"""
plt.figure(figsize=(14,9))
m.plot(on_subplots=True, colour=True, newfig=False)
"""
Explanation: Plotting and examining the posterior
The model has quite extensive plotting built in, with various options for colour, display of the data as points or connected lines, etc. Here we find that the model manages to separate all but two of the true clusters. The number of 'genes' found in each cluster is labeled in the corner of each plot.
End of explanation
"""
#exactly as above, but with a white-noise kernel for the structure.
k_underlying = GPy.kern.RBF(input_dim=1, variance=0.1, lengthscale=0.1)
k_corruption = GPy.kern.White(1, variance=0.1)
m = GPclust.MOHGP(X, k_underlying, k_corruption, Y, K=10, prior_Z='DP', alpha=1.0)
m.optimize()
m.systematic_splits(verbose=False)
plt.figure(figsize=(14,9))
m.plot(on_subplots=True, colour=True, newfig=False)
"""
Explanation: Structure is important
Why do we have to specify two kernels in GPclust? The first kernel describes the properties of the functions which underly each cluster. The second describes the properties of the functions which describe how each time-course (gene) deviates from the cluster.
This structure is important: if we model the deviation of each time-course from the cluster as simply noise, it's more difficult to infer the correct clusters. Such a model can be constructed in GPclust by using a white (noise) kernel for the structure, as follows.
End of explanation
"""
|
garth-wells/notebooks-3D7 | 01-ElasticBarLinearFEM.ipynb | mit | import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
# Use seaborn to style the plots and use accessible colors
import seaborn as sns
sns.set()
sns.set_palette("colorblind")
"""
Explanation: Finite element solver for an elastic rod
We create in this notebook a simple finite element solver for a linear elastic rod using continuous, piecewise linear finite elements.
We will use NumPy to perform the simulations, and Matplotlib to visualise the results, so we first import the NumPy and Matplotlib modules:
End of explanation
"""
E = 100.0
A = 1.0
"""
Explanation: We will also use ipywidgets (interactive widgets), so you will to make sure that are installed (ipywidgets is installed on the Azure notebook service).
A first solver
Elastic parameters
For our elastic rod, we define the Young's modulus $E$ and the cross-sectional area $A$. Both are assumed constant.
End of explanation
"""
def distributed_load(x):
return 1.0
"""
Explanation: Distributed load
We now define the distributed load $f$. We will use a function that takes the coordinate $x$ as an argument so we possibly define loading terms that vary with position.
End of explanation
"""
L = 10.0
n_cells = 30
n_nodes = n_cells + 1
"""
Explanation: Create a mesh
We will create a mesh of length $L$ with a prescribed number of cells (elements) $n_{\text{cells}}$. For linear elements, the number of nodes $n_{\text{nodes}}$ is equal to $n_{\text{cells}} + 1$.
End of explanation
"""
mesh = np.linspace(0.0, L, n_nodes)
"""
Explanation: To create a mesh from 0 to $L$ with equal size cells (elements) we will use the NumPy function linspace to generate an array of equally spaced points on the interval $[0, L]$.
End of explanation
"""
plt.xlabel('$x$')
plt.title('finite element mesh')
plt.plot(mesh, [0]*len(mesh), 'o-');
"""
Explanation: Matplotlib can be used to visualise the mesh:
End of explanation
"""
l = L/n_cells
k_e = (E*A/l)*np.array([[1, -1], [-1, 1]])
print(k_e)
"""
Explanation: A simple finite element solver
We have already defined our domain (the mesh) and the constitutive parameters ($E$ and $A$). We now need to build the global stiffness matrix $\boldsymbol{K}$ and the global right-hand side vector $\boldsymbol{b}$, after which we can solve $\boldsymbol{K} \boldsymbol{a} = \boldsymbol{b}$ to get the nodal degrees-of-freedom $\boldsymbol{a}$.
Create stiffness matrix $\boldsymbol{K}$
We create the global stiffness matrix by computing the element matrix $\boldsymbol{k}_{e}$ (which is constant since $A$, $E$ and the cell size are constant in our case), and then looping over all cells and adding their contribution to the global matrix.
Element stiffness matrix $\boldsymbol{k}_{e}$
The element stiffness matrix for a linear element of length $l$ and constant $AE$ is
$$
\boldsymbol{k}_{e}
= \frac{EA}{l}
\begin{bmatrix}
1 & -1 \ -1 & 1
\end{bmatrix}
$$
Our mesh has constant cells size, so we can compute $\boldsymbol{k}_{e}$ just once:
End of explanation
"""
K = np.zeros((n_nodes, n_nodes))
"""
Explanation: Assemble global stiffness matrix
To build the global stiffness matrix $\boldsymbol{K}$, we first create an empty $n_{\text{nodes}} \times n_{\text{nodes}}$ matrix:
End of explanation
"""
for element in range(n_cells):
K[element:element + 2, element:element + 2] += k_e
"""
Explanation: Next, we loop over each cell and add the cell contribution $\boldsymbol{k}_{e}$ to the the global matrix $\boldsymbol{K}$. This is known as assembly.
End of explanation
"""
b = np.zeros(n_nodes)
"""
Explanation: Create RHS vector $\boldsymbol{b}$
We create the global RHS vector $\boldsymbol{b}$ by computing the cell RHS $\boldsymbol{b}_{e}$ cell-by-cell, and adding this to the global RHS vector. We allow the distributed load $f$ to vary with position, which is why we cannot compute it just once. For simplicity we will integrate the local RHS using the midpoint rule. This is exact if $f$ is constant, and is otherwise approximate.
We first create an empty global RHS vector:
End of explanation
"""
for element in range(n_cells):
# Get cell length and midpoint
l = mesh[element + 1] - mesh[element]
x_mid = (mesh[element + 1] + mesh[element])/2.0
# Evaluate loading term
f = distributed_load(x_mid)
# Compute and add RHS contributions
b[element:element + 2] += 0.5*l*f
"""
Explanation: We now loop over each cell and compute $\int_{x_{i}}^{x_{i+1}} N_{1} f dx$ and $\int_{x_{i}}^{x_{i+1}} N_{2} f dx$ for each cell, and add the contribution to the global RHS vector:
End of explanation
"""
# Zero first row and first column
K[0, :] = 0.0
K[:, 0] = 0.0
# Place one on the diagonal of K and zero in the first entry on the RHS
K[0, 0] = 1.0
b[0] = 0.0
"""
Explanation: Apply Dirichet (displacement) boundary condition
We're almost ready to solve a finite element problem, but we would get into trouble if we tried to solve $\boldsymbol{K} \boldsymbol{a} = \boldsymbol{b}$ using the above stiffness matrix because it is singular (you can verify this by computing the determinant with np.linalg.det(K)). The system is singular because we have not applied a Dirichlet boundary condition, hence there is a rigid body translation mode in the system.
We impose the boundary condition $u = 0$ and $x=0$ by zeroing the first row and column of the matrix, placing a one on the first diagonal entry and setting the first entry on the RHS to zero. It should be clear algebraically that this will ensure that the first degree of freedom is equal to zero when we solve the system.
End of explanation
"""
u = np.linalg.solve(K, b)
"""
Explanation: Solve system of equations
We can now solve the finite element system $\boldsymbol{K}$:
End of explanation
"""
plt.xlabel('$x$')
plt.ylabel('$u$')
plt.title('Finite element solution for the elastic bar')
plt.plot(mesh, u, 'o-');
"""
Explanation: Visualising the solution
We now plot the solution:
End of explanation
"""
def solver(L, f, n_cells, quad_degree=3):
"A simple finite element solver for a 1D bar"
# Crarte mesh and compute cell size
n_nodes = n_cells + 1
mesh = np.linspace(0.0, L, n_nodes)
l = L/n_cells
# Compute locall stiffness matrix
k_e = (E*A/l)*np.array([[1, -1], [-1, 1]])
# Assemble global stiffnes matrix
K = np.zeros((n_nodes, n_nodes))
for element in range(n_cells):
K[element:element + 2, element:element + 2] += k_e
# Use NumPy to get quadrature points and weights
quad_points, quad_weights = np.polynomial.legendre.leggauss(quad_degree)
# Assemble RHS using Gauss quadrature
b = np.zeros(n_nodes)
for element in range(n_cells):
# Get cell midpoint
x_mid = (mesh[element + 1] + mesh[element])/2.0
# Loop over quadrature points
for zeta, weight in zip(quad_points, quad_weights):
# Compute coordinate of point
x = x_mid + zeta*l/2.0
# Evaluate loading term
f_load = f(x)
# Quadrature weight
w = weight*(l/2.0)
# Compute RHS contributions
N = 0.5 - zeta/2.0
b[element] += w*N*f_load
N = 0.5 + zeta/2.0
b[element + 1] += w*N*f_load
# Apply boundary condition
K[0, :], K[:, 0], K[0, 0] = 0.0, 0.0, 1.0
b[0] = 0.0
return np.linalg.solve(K, b), mesh
"""
Explanation: A more programmatic approach
We now present a finite element solver that is very similar the one above, but we now provide a programming interface so we can reuse a function to explore different loading functions and different levels of refinement of the mesh.
End of explanation
"""
def f_sine(x):
return np.sin(x)
"""
Explanation: We want to see how the solution changes with mesh refinement for some loading function. To set $f = \sin(x)$, we create a function:
End of explanation
"""
meshes = [3, 5, 10, 20]
solutions = [solver(L, f_sine, n) for n in meshes]
"""
Explanation: We now compute solutions for four increasingly fine meshes and store the mesh and the computed displacement field. We pass the domain length (L), the function for computing the loading (f_sine) and the number cells in the mesh (n):
End of explanation
"""
plt.xlabel('$x$')
plt.ylabel('$u$')
plt.title('Finite element solution for the elastic bar')
for u, mesh in solutions:
plt.plot(mesh, u, 'o-', label=str(len(mesh)-1) + ' cells');
plt.legend(loc='upper left');
"""
Explanation: Plotting the solutions on the same graph:
End of explanation
"""
from ipywidgets import widgets
from ipywidgets import interact
# Compute reference solution with 100 cells
u_ref, mesh_ref = solver(L, f_sine, 100)
@interact(num_cells=widgets.IntSlider(min=1, max=40, value=5, description='number of cells'))
def plot(num_cells=5):
plt.xlabel('$x$')
plt.ylabel('$u$')
plt.title('Finite element solution for the elastic bar')
u, mesh = solver(L, f_sine, num_cells, quad_degree=6)
plt.plot(mesh_ref, u_ref, '--', color='k', label='reference solution');
plt.plot(mesh, u, 'o-', label=str(len(mesh)-1) + ' cells');
plt.legend(loc='upper left');
"""
Explanation: We can see that the solutions get closer as the mesh is refined.
Exercise Experiment with your own loading function, and compare the computed results to an analytical solution.
Interactive solver
We can make an interactive solver, where you can change the number of cells via a slider and see how the solution changes. We will use a high-order quadrature scheme to keep the integration error small on the coarse meshes,
You need to run this notebook in a Jupyter session to see and use the slider.
End of explanation
"""
|
lyndond/Analyzing_Neural_Time_Series | chapter09.ipynb | mit | import numpy as np
import scipy.io
from matplotlib import pyplot as plt
"""
Explanation: 9. Overview of time-domain EEG analyses
End of explanation
"""
data = scipy.io.loadmat('sampleEEGdata')
#get all the data we need from the eeg file. Working with .mat files like this is not ideal, as you can clearly see below.
#A better way to access this data would be to re-save the sampleEEGdata.mat file as v-7.3 in matlab, or convert it to hdf5,
#then open it in python using h5py or pytables. Since I'd rather not mess with the batteries-included-ness of this book,
#I'll keep the data as-is and extract what we'll need.
eeg_data = data["EEG"][0,0]["data"]
eeg_pts = data["EEG"][0,0]["pnts"][0,0] #number of points in EEG data
eeg_times = data["EEG"][0,0]["times"][0]
eeg_rate = float(data["EEG"][0,0]["srate"][0]) #make float for division purposes later
eeg_trials = data["EEG"][0,0]["trials"][0,0]
eeg_epoch=data["EEG"][0,0]["epoch"][0]
which_channel_to_plot = 'FCz' #specify label of channel to plot
eeg_chan_locs_labels=data["EEG"][0,0]["chanlocs"][0]["labels"]
channel_index = (eeg_chan_locs_labels == which_channel_to_plot) #specify index (channel number) of label
x_axis_limit = (-200, 1000) #in milliseconds
num_trials2plot = 12
plt.figure(figsize=(10, 6))
# pick a random trials using random.choice (from numpy.random)
random_trial_to_plot = np.random.choice(np.arange(eeg_trials), num_trials2plot)
# figure out how many subplots we need
n_rows = np.ceil(num_trials2plot/np.ceil(np.sqrt(num_trials2plot))).astype(int)
n_cols = np.ceil(np.sqrt(num_trials2plot)).astype(int)
fig, ax = plt.subplots(n_rows, n_cols, sharex='all')
for ii in range(num_trials2plot):
idx = np.unravel_index(ii, (n_rows, n_cols))
#plot trial and specify x-axis and title
ax[idx].plot(eeg_times, np.squeeze(eeg_data[channel_index,:,random_trial_to_plot[ii] - 1]))
ax[idx].set(title=f"Trial {random_trial_to_plot[ii]}", yticks=[])
fig.tight_layout()
"""
Explanation: Figure 9.1a
End of explanation
"""
#plot all trials
plt.plot(eeg_times,np.squeeze(eeg_data[channel_index,:,:]),'y')
#plot the event-related potential (ERP), i.e. the average time-domain signal
plt.plot(eeg_times,np.squeeze(np.mean(eeg_data[channel_index,:,:],axis=2)),'k',linewidth=2)
_=plt.title("All EEG traces, and their average")
#now plot only the ERP
plt.plot(eeg_times,np.squeeze(np.mean(eeg_data[channel_index,:,:],axis=2))) #axis=2 specifies which axis to compute the mean along
plt.vlines(0,-10,10,linestyles='dashed')
plt.hlines(0,-1000,1500)
plt.axis([-300,1000,-10,10])
plt.xlabel("Time from stimlulus onset (ms)")
plt.ylabel(r'$ \mu V $') #latex interpreter looks for dollar signs
plt.title("ERP (average of " + str(eeg_trials) + " trials) from electrode " + eeg_chan_locs_labels[channel_index][0][0])
plt.gca().invert_yaxis() #EEG convention to flip y axis
"""
Explanation: Figure 9.1b
End of explanation
"""
import scipy.signal as sig
chan2plot = "P7"
channel_index = eeg_chan_locs_labels == chan2plot #specify index (channel number) of label
erp = np.squeeze(np.mean(eeg_data[channel_index,:,:],axis=2))
nyquist = eeg_rate/2.
transition_width = 0.15
#low-pass filter data
#we'll look at filtering in detail in chapter 14
#filter form 0-40
filter_high = 40 #Hz; high cut off
b, a = sig.butter(5, np.array([filter_high*(1+transition_width)])/nyquist,btype="lowpass")
erp_0to40 = sig.filtfilt(b, a, erp, padlen=150) #use filfilt (filters forwards and backwards to eliminate phase shift)
#next, filter from 0-10
filter_high = 10 #Hz
b, a = sig.butter(5, np.array([filter_high*(1+transition_width)])/nyquist,btype="lowpass")
erp_0to10 = sig.filtfilt(b, a, erp, padlen=150)
#next, filter from 5-15
filter_low = 5 # Hz
filter_high = 15 # Hz
b, a = sig.butter(5, np.array([filter_low*(1-transition_width), filter_high*(1+transition_width)])/nyquist,btype="bandpass")
erp_5to15 = sig.filtfilt(b, a, erp, padlen=150)
plt.figure()
plt.plot(eeg_times,erp,'k')
plt.plot(eeg_times,erp_0to40,'c')
plt.plot(eeg_times,erp_0to10,'r')
plt.plot(eeg_times,erp_5to15,'m')
plt.xlim([-200,1200])
plt.gca().invert_yaxis()
plt.xlabel("time (ms)")
plt.ylabel("voltage " + r"$(\mu V)$")
plt.title("Raw and filtered signal")
_=plt.legend(['raw','0-40 Hz','0-10Hz','5-15Hz'])
"""
Explanation: Figure 9.2
To my knowledge, Python (specifically, scipy) does not have a function that is completely analgous to MATLAB's firls(). A very close approximation that I will use instead is an n-th order Butterworth bandpass filter.
TODO
End of explanation
"""
plt.figure()
plt.subplot(211)
plt.plot(eeg_times,np.squeeze(eeg_data.mean(axis=0)))
plt.xlim([-200, 1000])
plt.gca().invert_yaxis() #flip for EEG conventions
plt.title("ERP from all sensors")
#topographical variance plot
plt.subplot(212)
plt.plot(eeg_times,np.squeeze(eeg_data.mean(axis=0).var(axis=1)))
plt.xlim([-200,1000])
plt.xlabel("Time (ms)")
plt.ylabel("var "+r'$ (\mu V) $')
plt.title("Topographical variance")
plt.tight_layout()
"""
Explanation: Figure 9.3
End of explanation
"""
use_rts = True #or false
#get RTs from each trial to use for sorting trials. In this experiment,
#the RT was always the first event after the stimulus (the time=0 event).
#Normally, you should build in exceptions in case there was no response or
#another event occured between the stimulus and response. This was already
#done for the current dataset.
rts = np.zeros(len(eeg_epoch))
for ei in range(len(eeg_epoch)):
#first, find the index at which time = 0 event occurs
time0event = eeg_epoch[ei]["eventlatency"][0] == 0 #bool array of where time=0 occurs
time0event = np.where(time0event == time0event.max())[0][0] # find the index of the True value in this array
rts[ei] = eeg_epoch[ei]["eventlatency"][0][time0event+1]
if use_rts:
rts_idx=np.argsort(rts)
else:
rts_idx = np.argsort(np.squeeze(eeg_data[46,333,:]))
#plot the trials for one channel, in (un)sorted order
plt.imshow(np.squeeze(eeg_data[46,:,rts_idx]),
extent=[eeg_times[0], eeg_times[-1], 1, eeg_trials],
aspect="auto",
cmap=plt.get_cmap("jet"),
origin="lower",
interpolation="none")
plt.xlabel("time from stim onset (ms)")
plt.ylabel("trial number")
plt.clim([-30,30])
plt.colorbar(label=r"$\mu V$")
plt.axis([-200,1200,1,99])
plt.grid(False)
if use_rts:
rtplot=plt.plot(rts[rts_idx],np.arange(1,eeg_trials+1),'k',linewidth=3, label= "Reaction time")
plt.legend(bbox_to_anchor=[1.5,1]) #put the legend outside of the image
"""
Explanation: Figures 9.4-9.5 use the function topoplot from MATLAB toolbox EEGlab
TODO
Figure 9.6
End of explanation
"""
|
maartenbreddels/vaex | docs/source/tutorial_jupyter.ipynb | mit | import vaex
import vaex.jupyter.model as vjm
import numpy as np
import matplotlib.pyplot as plt
df = vaex.example()
df
"""
Explanation: <div class="alert alert-info">
**Warning:** This notebook needs a running kernel to be fully interactive, please run it locally or on [mybinder](https://mybinder.org/v2/gh/vaexio/vaex/master?filepath=docs%2Fsource%2Ftutorial_jupyter.ipynb).
</div>
Jupyter integration: interactivity
Vaex can process about 1 billion rows per second, and in combination with the Jupyter notebook, this allows for interactive exporation of large datasets.
Introduction
The vaex-jupyter package contains the building blocks to interactively define an N-dimensional grid, which is then used for visualizations.
We start by defining the building blocks (vaex.jupyter.model.Axis, vaex.jupyter.model.DataArray and vaex.jupyter.view.DataArray) used to define and visualize our N-dimensional grid.
Let us first import the relevant packages, and open the example DataFrame:
End of explanation
"""
E_axis = vjm.Axis(df=df, expression=df.E, shape=140)
Lz_axis = vjm.Axis(df=df, expression=df.Lz, shape=100)
Lz_axis
"""
Explanation: We want to build a 2 dimensinoal grid with the number counts in each bin. To do this, we first define two axis objects:
End of explanation
"""
await vaex.jupyter.gather() # wait until Vaex is done with all background computation
Lz_axis # now min and max are computed, and bin_centers is set
"""
Explanation: When we inspect the Lz_axis object we see that the min, max, and bin centers are all None. This is because Vaex calculates them in the background, so the kernel stays interactive, meaning you can continue working in the notebook. We can ask Vaex to wait until all background calculations are done. Note that for billions of rows, this can take over a second.
End of explanation
"""
data_array_widget = df.widget.data_array(axes=[Lz_axis, E_axis], selection=[None, 'default'])
data_array_widget # being the last expression in the cell, Jupyter will 'display' the widget
"""
Explanation: Note that the Axis is a traitlets HasTrait object, similar to all ipywidget objects. This means that we can link all of its properties to an ipywidget and thus creating interactivity. We can also use observe to listen to any changes to our model.
An interactive xarray DataArray display
Now that we have defined our two axes, we can create a vaex.jupyter.model.DataArray (model) together with a vaex.jupyter.view.DataArray (view).
A convenient way to do this, is to use the widget accessor data_array method, which creates both, links them together and will return a view for us.
The returned view is an ipywidget object, which becomes a visual element in the Jupyter notebook when displayed.
End of explanation
"""
# NOTE: since the computations are done in the background, data_array_widget.model.grid is initially None.
# We can ask vaex-jupyter to wait till all executions are done using:
await vaex.jupyter.gather()
# get a reference to the xarray DataArray object
data_array = data_array_widget.model.grid
print(f"type:", type(data_array))
print("dims:", data_array.dims)
print("data:", data_array.data)
print("coords:", data_array.coords)
print("Lz's data:", data_array.coords['Lz'].data)
print("Lz's attrs:", data_array.coords['Lz'].attrs)
print("And displaying the xarray DataArray:")
display(data_array) # this is what the vaex.jupyter.view.DataArray uses
"""
Explanation: Note: If you see this notebook on readthedocs, you will see the selection coordinate already has [None, 'default'], because cells below have already been executed and have updated this widget. If you run this notebook yourself (say on mybinder), you will see after executing the above cell, the selection will have [None] as its only value.
From the specification of the axes and the selections, Vaex computes a 3d histogram, the first dimension being the selections. Interally this is simply a numpy array, but we wrap it in an xarray DataArray object. An xarray DataArray object can be seen as a labeled Nd array, i.e. a numpy array with extra metadata to make it fully self-describing.
Notice that in the above code cell, we specified the selection argument with a list containing two elements in this case, None and 'default'. The None selection simply shows all the data, while the default refers to any selection made without explicitly naming it. Even though the later has not been defined at this point, we can still pre-emptively include it, in case we want to modify it later.
The most important properties of the data_array are printed out below:
End of explanation
"""
df.select(df.x > 0)
"""
Explanation: Note that data_array.coords['Lz'].data is the same as Lz_axis.bin_centers and data_array.coords['Lz'].attrs contains the same min/max as the Lz_axis.
Also, we see that displaying the xarray.DataArray object (data_array_view.model.grid) gives us the same output as the data_array_view above. There is a big difference however. If we change a selection:
End of explanation
"""
# NOTE: da is short for 'data array'
def plot2d(da):
plt.figure(figsize=(8, 8))
ar = da.data[1] # take the numpy data, and select take the selection
print(f'imshow of a numpy array of shape: {ar.shape}')
plt.imshow(np.log1p(ar.T), origin='lower')
df.widget.data_array(axes=[Lz_axis, E_axis], display_function=plot2d, selection=[None, True])
"""
Explanation: and scroll back we see that the data_array_view widget has updated itself, and now contains two selections! This is a very powerful feature, that allows us to make interactive visualizations.
Interactive plots
To make interactive plots we can pass a custom display_function to the data_array_widget. This will override the default notebook behaviour which is a call to display(data_array_widget). In the following example we create a function that displays a matplotlib figure:
End of explanation
"""
df.select(df.id < 10)
"""
Explanation: In the above figure, we choose index 1 along the selection axis, which referes to the 'default' selection. Choosing an index of 0 would correspond to the None selection, and all the data would be displayed. If we now change the selection, the figure will update itself:
End of explanation
"""
def plot2d_with_labels(da):
plt.figure(figsize=(8, 8))
grid = da.data # take the numpy data
dim_x = da.dims[0]
dim_y = da.dims[1]
plt.title(f'{dim_y} vs {dim_x} - shape: {grid.shape}')
extent = [
da.coords[dim_x].attrs['min'], da.coords[dim_x].attrs['max'],
da.coords[dim_y].attrs['min'], da.coords[dim_y].attrs['max']
]
plt.imshow(np.log1p(grid.T), origin='lower', extent=extent, aspect='auto')
plt.xlabel(da.dims[0])
plt.ylabel(da.dims[1])
da_plot_view_nicer = df.widget.data_array(axes=[Lz_axis, E_axis], display_function=plot2d_with_labels)
da_plot_view_nicer
"""
Explanation: As xarray's DataArray is fully self describing, we can improve the plot by using the dimension names for labeling, and setting the extent of the figure's axes.
Note that we don't need any information from the Axis objects created above, and in fact, we should not use them, since they may not be in sync with the xarray DataArray object. Later on, we will create a widget that will edit the Axis' expression.
Our improved visualization with proper axes and labeling:
End of explanation
"""
def plot2d_with_selections(da):
grid = da.data
# Create 1 row and #selections of columns of matplotlib axes
fig, axgrid = plt.subplots(1, grid.shape[0], sharey=True, squeeze=False)
for selection_index, ax in enumerate(axgrid[0]):
ax.imshow(np.log1p(grid[selection_index].T), origin='lower')
df.widget.data_array(axes=[Lz_axis, E_axis], display_function=plot2d_with_selections,
selection=[None, 'default', 'rest'])
"""
Explanation: We can also create more sophisticated plots, for example one where we show all of the selections. Note that we can pre-emptively expect a selection and define it later:
End of explanation
"""
df.select(df.id < 10) # select 10 objects
df.select(df.id >= 10, name='rest') # and the rest
"""
Explanation: Modifying a selection will update the figure.
End of explanation
"""
FeH_axis = vjm.Axis(df=df, expression='FeH', min=-3, max=1, shape=5)
da_view = df.widget.data_array(axes=[E_axis, Lz_axis, FeH_axis], selection=[None, 'default'])
da_view
"""
Explanation: Another advantage of using xarray is its excellent plotting capabilities. It handles a lot of the boring stuff like axis labeling, and also provides a nice interface for slicing the data even more.
Let us introduce another axis, FeH (fun fact: FeH is a property of stars that tells us how much iron relative to hydrogen is contained in them, an idicator of their origin):
End of explanation
"""
def plot_with_xarray(da):
da_log = np.log1p(da) # Note that an xarray DataArray is like a numpy array
da_log.plot(x='Lz', y='E', col='FeH', row='selection', cmap='viridis')
plot_view = df.widget.data_array([E_axis, Lz_axis, FeH_axis], display_function=plot_with_xarray,
selection=[None, 'default', 'rest'])
plot_view
"""
Explanation: We can see that we now have a 4 dimensional grid, which we would like to visualize.
And xarray's plot make our life much easier:
End of explanation
"""
selection_widget = df.widget.selection_expression()
selection_widget
"""
Explanation: We only have to tell xarray which axis it should map to which 'aesthetic', speaking in Grammar of Graphics terms.
Selection widgets
Although we can change the selection in the notebook (e.g. df.select(df.id > 20)), if we create a dashboard (using Voila) we cannot execute arbitrary code. Vaex-jupyter also comes with many widgets, and one of them is a selection_expression widget:
End of explanation
"""
await vaex.jupyter.gather()
w = df.widget.counter_selection('default', lazy=True)
w
"""
Explanation: The counter_selection creates a widget which keeps track of the number of rows in a selection. In this case we ask it to be 'lazy', which means that it will not cause extra passes over the data, but will ride along if some user action triggers a calculation.
End of explanation
"""
x_axis = vjm.Axis(df=df, expression=df.Lz)
y_axis = vjm.Axis(df=df, expression=df.E)
da_xy_view = df.widget.data_array(axes=[x_axis, y_axis], display_function=plot2d_with_labels, shape=180)
da_xy_view
"""
Explanation: Axis control widgets
Let us create new axis objects using the same expressions as before, but give them more general names (x_axis and y_axis), because we want to change the expressions interactively.
End of explanation
"""
# wait for the previous plot to finish
await vaex.jupyter.gather()
# Change both the x and y axis
x_axis.expression = np.log(df.x**2)
y_axis.expression = df.y
# Note that both assignment will create 1 computation in the background (minimal amount of passes over the data)
await vaex.jupyter.gather()
# vaex computed the new min/max, and the xarray DataArray
# x_axis.min, x_axis.max, da_xy_view.model.grid
"""
Explanation: Again, we can change the expressions of the axes programmatically:
End of explanation
"""
x_widget = df.widget.expression(x_axis.expression, label='X axis')
x_widget
"""
Explanation: But, if we want to create a dashboard with Voila, we need to have a widget that controls them:
End of explanation
"""
from ipywidgets import link
link((x_widget, 'value'), (x_axis, 'expression'))
"""
Explanation: This widget will allow us to edit an expression, which will be validated by Vaex. How do we 'link' the value of the widget to the axis expression? Because both the Axis as well as the x_widget are HasTrait objects, we can link their traits together:
End of explanation
"""
y_widget = df.widget.expression(y_axis, label='X axis')
# vaex now does this for us, much shorter
# link((y_widget, 'value'), (y_axis, 'expression'))
y_widget
await vaex.jupyter.gather() # lets wait again till all calculations are finished
"""
Explanation: Since this operation is so common, we can also directly pass the Axis object, and Vaex will set up the linking for us:
End of explanation
"""
from vaex.jupyter.widgets import ContainerCard
ContainerCard(title='My plot',
subtitle="using vaex-jupyter",
main=da_xy_view,
controls=[x_widget, y_widget], show_controls=True)
"""
Explanation: A nice container
If you are familiar with the ipyvuetify components, you can combine them to create very pretty widgets. Vaex-jupyter comes with a nice container:
End of explanation
"""
y_axis.expression = df.vx
"""
Explanation: We can directly assign a Vaex expression to the x_axis.expression, or to x_widget.value since they are linked.
End of explanation
"""
df = vaex.example() # we create the dataframe again, to leave all the plots above 'alone'
heatmap_xy = df.widget.heatmap(df.x, df.y, selection=[None, True])
heatmap_xy
"""
Explanation: Interactive plots
So far we have been using interactive widgets to control the axes in the view. The figure itself however was not interactive, and we could not have panned or zoomed for example.
Vaex has a few builtin visualizations, most notably a heatmap and histogram using bqplot:
End of explanation
"""
heatmap_xy.model.x
"""
Explanation: Note that we passed expressions, and not axis objects. Vaex recognizes this and will create the axis objects for you. You can access them from the model:
End of explanation
"""
x_widget = df.widget.expression(heatmap_xy.model.x, label='X axis')
y_widget = df.widget.expression(heatmap_xy.model.y, label='X axis')
ContainerCard(title='My plot',
subtitle="using vaex-jupyter and bqplot",
main=heatmap_xy,
controls=[x_widget, y_widget, selection_widget],
show_controls=True,
card_props={'style': 'min-width: 800px;'})
"""
Explanation: The heatmap itself is again a widget. Thus we can combine it with other widgets to create a more sophisticated interface.
End of explanation
"""
heatmap_xy.tool = 'pan-zoom' # we can also do this programmatically.
"""
Explanation: By switching the tool in the toolbar (click <i aria-hidden="true" class="v-icon notranslate material-icons theme--light">pan_tool</i>, or changing it programmmatically in the next cell), we can zoom in. The plot's axis bounds are directly synched to the axis object (the x_min is linked to the x_axis min, etc). Thus a zoom action causes the axis objects to be changed, which will trigger a recomputation.
End of explanation
"""
heatmap_xy.model.x.expression = np.log10(df.x**2)
await vaex.jupyter.gather() # and we wait before we continue
"""
Explanation: Since we can access the Axis objects, we can also programmatically change the heatmap. Note that both the expression widget, the plot axis label and the heatmap it self is updated. Everything is linked together!
End of explanation
"""
histogram_Lz = df.widget.histogram(df.Lz, selection_interact='default')
histogram_Lz.tool = 'select-x'
histogram_Lz
# You can graphically select a particular region, in this case we do it programmatically
# for reproducability of this notebook
histogram_Lz.plot.figure.interaction.selected = [1200, 1300]
"""
Explanation: Another visualization based on bqplot is the interactive histogram. In the example below, we show all the data, but the selection interaction will affect/set the 'default' selection.
End of explanation
"""
|
robertoalotufo/ia898 | src/rgb2hsv.ipynb | mit | def rgb2hsv(rgb_img):
import numpy as np
r = rgb_img[:,:,0].ravel()
g = rgb_img[:,:,1].ravel()
b = rgb_img[:,:,2].ravel()
hsv_map = map(rgb2hsvmap, r, g, b)
hsv_img = np.array(list(hsv_map)).reshape(rgb_img.shape)
return hsv_img
def rgb2hsvmap(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
# HSV: Hue, Saturation, Value
# H: position in the spectrum
# S: color saturation ("purity")
# V: color brightness
v = maxc
if minc == maxc:
return 0.0, 0.0, v
s = (maxc-minc) / maxc
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc:
h = bc-gc
elif g == maxc:
h = 2.0+rc-bc
else:
h = 4.0+gc-rc
h = (h/6.0) % 1.0
#tonalidade variando de 0 a 360, indicando o ângulo no circulo aonde a tonalidade (H) está definido,
#e a saturação e o brilho variando de 0.0 a 1.0, representando o menor e o maior valor possível
return h, s, v
#adapted from python source code (Lib/colorsys.py - rgb_to_hsv(r, g, b))
"""
Explanation: Function rgb2hsv
Synopse
Convert RGB to HSV image.
g = rgb2hsv(rgb_img)
g: Image.
rgb_img: Image rgb.
End of explanation
"""
testing = (__name__ == "__main__")
if testing:
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import matplotlib.colors as mcolor
rgb_img = mpimg.imread('/etc/jupyterhub/ia898_1s2017/ia898/data/boat.tif')
print('rgb_img.shape: ' , rgb_img.shape)
plt.figure(1)
plt.title('RGB')
plt.imshow(rgb_img)
r = rgb_img[:,:,0].ravel()
g = rgb_img[:,:,1].ravel()
b = rgb_img[:,:,2].ravel()
print('r: ', r)
print('g: ',g)
print('b: ',b)
hsv_img = rgb2hsv(rgb_img)
print('hsv_img.shape: ' , hsv_img.shape)
h = hsv_img[:,:,0].ravel()
s = hsv_img[:,:,1].ravel()
v = hsv_img[:,:,2].ravel()
plt.figure(2)
plt.title('rgb2hsv')
plt.imshow(hsv_img)
mcolor_hsv = mcolor.rgb_to_hsv(rgb_img)
plt.figure(3)
plt.title('mcolor.rgb_to_hsv')
plt.imshow(mcolor_hsv)
print('h: ', h)
print('s: ',s)
print('v: ',v)
"""
Explanation: Description
Returns a image using HSV color model: the H (Hue), S (Saturation), V (Value) of a RGB image. The HSV model is a representation in cylindrical coordinates of the points of the RGB version.
Examples
Example 1
End of explanation
"""
if testing:
import sys,os
ia898path = os.path.abspath('/etc/jupyterhub/ia898_1s2017/')
if ia898path not in sys.path:
sys.path.append(ia898path)
import ia898.src as ia
pixels = np.array([
[[243., 114., 25.],
[111., 19., 115.],
[43., 188., 69.]],
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]]
])
print("Shape: ", pixels.shape)
hsv_img = rgb2hsv(pixels)
fig = plt.figure(figsize=(10, 10))
fig.add_subplot(1, 3, 1)
plt.imshow(pixels)
plt.title('Original image RGB')
fig.add_subplot(1, 3, 2)
plt.imshow(hsv_img, cmap='hsv')
plt.title('Image HSV rgb2hsv')
mcolor_hsv = mcolor.rgb_to_hsv(pixels.copy())
fig.add_subplot(1, 3, 3)
plt.imshow(mcolor_hsv, cmap='hsv')
plt.title('Image HSV mcolor')
plt.show()
"""
Explanation: Example 2
End of explanation
"""
|
xpharry/Udacity-DLFoudation | tutorials/sentiment_network/Sentiment Classification - Project 4 Solution.ipynb | mit | def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
g = open('reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
len(reviews)
reviews[0]
labels[0]
"""
Explanation: Sentiment Classification & How To "Frame Problems" for a Neural Network
by Andrew Trask
Twitter: @iamtrask
Blog: http://iamtrask.github.io
What You Should Already Know
neural networks, forward and back-propagation
stochastic gradient descent
mean squared error
and train/test splits
Where to Get Help if You Need it
Re-watch previous Udacity Lectures
Leverage the recommended Course Reading Material - Grokking Deep Learning (40% Off: traskud17)
Shoot me a tweet @iamtrask
Tutorial Outline:
Intro: The Importance of "Framing a Problem"
Curate a Dataset
Developing a "Predictive Theory"
PROJECT 1: Quick Theory Validation
Transforming Text to Numbers
PROJECT 2: Creating the Input/Output Data
Putting it all together in a Neural Network
PROJECT 3: Building our Neural Network
Understanding Neural Noise
PROJECT 4: Making Learning Faster by Reducing Noise
Analyzing Inefficiencies in our Network
PROJECT 5: Making our Network Train and Run Faster
Further Noise Reduction
PROJECT 6: Reducing Noise by Strategically Reducing the Vocabulary
Analysis: What's going on in the weights?
Lesson: Curate a Dataset
End of explanation
"""
print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998)
"""
Explanation: Lesson: Develop a Predictive Theory
End of explanation
"""
from collections import Counter
import numpy as np
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
for i in range(len(reviews)):
if(labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
positive_counts.most_common()
pos_neg_ratios = Counter()
for term,cnt in list(total_counts.most_common()):
if(cnt > 100):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
pos_neg_ratios[term] = pos_neg_ratio
for word,ratio in pos_neg_ratios.most_common():
if(ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio+0.01)))
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
"""
Explanation: Project 1: Quick Theory Validation
End of explanation
"""
from IPython.display import Image
review = "This was a horrible, terrible movie."
Image(filename='sentiment_network.png')
review = "The movie was excellent"
Image(filename='sentiment_network_pos.png')
"""
Explanation: Transforming Text into Numbers
End of explanation
"""
vocab = set(total_counts.keys())
vocab_size = len(vocab)
print(vocab_size)
list(vocab)
import numpy as np
layer_0 = np.zeros((1,vocab_size))
layer_0
from IPython.display import Image
Image(filename='sentiment_network.png')
word2index = {}
for i,word in enumerate(vocab):
word2index[word] = i
word2index
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
def get_target_for_label(label):
if(label == 'POSITIVE'):
return 1
else:
return 0
labels[0]
get_target_for_label(labels[0])
labels[1]
get_target_for_label(labels[1])
"""
Explanation: Project 2: Creating the Input/Output Data
End of explanation
"""
import time
import sys
import numpy as np
# Let's tweak our network from before to model these phenomena
class SentimentNetwork:
def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):
# set our random number generator
np.random.seed(1)
self.pre_process_data(reviews, labels)
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set()
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
if(word in self.word2index.keys()):
self.layer_0[0][self.word2index[word]] += 1
def get_target_for_label(self,label):
if(label == 'POSITIVE'):
return 1
else:
return 0
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
def train(self, training_reviews, training_labels):
assert(len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
self.update_input_layer(review)
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
# TODO: Backpropagated error
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
# TODO: Update the weights
self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step
if(np.abs(layer_2_error) < 0.5):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
# Input Layer
self.update_input_layer(review.lower())
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
if(layer_2[0] > 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
# evaluate our model before training (just to show how horrible it is)
mlp.test(reviews[-1000:],labels[-1000:])
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01)
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001)
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
"""
Explanation: Project 3: Building a Neural Network
Start with your neural network from the last chapter
3 layer neural network
no non-linearity in hidden layer
use our functions to create the training data
create a "pre_process_data" function to create vocabulary for our training data generating functions
modify "train" to train over the entire corpus
Where to Get Help if You Need it
Re-watch previous week's Udacity Lectures
Chapters 3-5 - Grokking Deep Learning - (40% Off: traskud17)
End of explanation
"""
from IPython.display import Image
Image(filename='sentiment_network.png')
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
review_counter = Counter()
for word in reviews[0].split(" "):
review_counter[word] += 1
review_counter.most_common()
"""
Explanation: Understanding Neural Noise
End of explanation
"""
import time
import sys
import numpy as np
# Let's tweak our network from before to model these phenomena
class SentimentNetwork:
def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):
# set our random number generator
np.random.seed(1)
self.pre_process_data(reviews, labels)
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set()
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
if(word in self.word2index.keys()):
self.layer_0[0][self.word2index[word]] = 1
def get_target_for_label(self,label):
if(label == 'POSITIVE'):
return 1
else:
return 0
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
def train(self, training_reviews, training_labels):
assert(len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
self.update_input_layer(review)
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
# TODO: Backpropagated error
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
# TODO: Update the weights
self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step
if(np.abs(layer_2_error) < 0.5):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
# Input Layer
self.update_input_layer(review.lower())
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
if(layer_2[0] > 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
mlp.train(reviews[:-1000],labels[:-1000])
# evaluate our model before training (just to show how horrible it is)
mlp.test(reviews[-1000:],labels[-1000:])
"""
Explanation: Project 4: Reducing Noise in our Input Data
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.19/_downloads/05c57a644672d33707fd1264df7f5617/plot_time_frequency_global_field_power.ipynb | bsd-3-clause | # Authors: Denis A. Engemann <[email protected]>
# Stefan Appelhoff <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import somato
from mne.baseline import rescale
from mne.stats import bootstrap_confidence_interval
"""
Explanation: Explore event-related dynamics for specific frequency bands
The objective is to show you how to explore spectrally localized
effects. For this purpose we adapt the method described in [1]_ and use it on
the somato dataset. The idea is to track the band-limited temporal evolution
of spatial patterns by using the :term:Global Field Power(GFP) <GFP>.
We first bandpass filter the signals and then apply a Hilbert transform. To
reveal oscillatory activity the evoked response is then subtracted from every
single trial. Finally, we rectify the signals prior to averaging across trials
by taking the magniude of the Hilbert.
Then the :term:GFP is computed as described in [2], using the sum of the
squares but without normalization by the rank.
Baselining is subsequently applied to make the :term:GFPs <GFP> comparable
between frequencies.
The procedure is then repeated for each frequency band of interest and
all :term:GFPs <GFP> are visualized. To estimate uncertainty, non-parametric
confidence intervals are computed as described in [3] across channels.
The advantage of this method over summarizing the Space x Time x Frequency
output of a Morlet Wavelet in frequency bands is relative speed and, more
importantly, the clear-cut comparability of the spectral decomposition (the
same type of filter is used across all bands).
We will use this dataset: somato-dataset
References
.. [1] Hari R. and Salmelin R. Human cortical oscillations: a neuromagnetic
view through the skull (1997). Trends in Neuroscience 20 (1),
pp. 44-49.
.. [2] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG signals,
vol. 108, 328-342, NeuroImage.
.. [3] Efron B. and Hastie T. Computer Age Statistical Inference (2016).
Cambrdige University Press, Chapter 11.2.
End of explanation
"""
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
# let's explore some frequency bands
iter_freqs = [
('Theta', 4, 7),
('Alpha', 8, 12),
('Beta', 13, 25),
('Gamma', 30, 45)
]
"""
Explanation: Set parameters
End of explanation
"""
# set epoching parameters
event_id, tmin, tmax = 1, -1., 3.
baseline = None
# get the header to extract events
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
frequency_map = list()
for band, fmin, fmax in iter_freqs:
# (re)load the data to save memory
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.pick_types(meg='grad', eog=True) # we just look at gradiometers
# bandpass filter
raw.filter(fmin, fmax, n_jobs=1, # use more jobs to speed up.
l_trans_bandwidth=1, # make sure filter params are the same
h_trans_bandwidth=1) # in each band and skip "auto" option.
# epoch
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=baseline,
reject=dict(grad=4000e-13, eog=350e-6),
preload=True)
# remove evoked response
epochs.subtract_evoked()
# get analytic signal (envelope)
epochs.apply_hilbert(envelope=True)
frequency_map.append(((band, fmin, fmax), epochs.average()))
del epochs
del raw
"""
Explanation: We create average power time courses for each frequency band
End of explanation
"""
# Helper function for plotting spread
def stat_fun(x):
"""Return sum of squares."""
return np.sum(x ** 2, axis=0)
# Plot
fig, axes = plt.subplots(4, 1, figsize=(10, 7), sharex=True, sharey=True)
colors = plt.get_cmap('winter_r')(np.linspace(0, 1, 4))
for ((freq_name, fmin, fmax), average), color, ax in zip(
frequency_map, colors, axes.ravel()[::-1]):
times = average.times * 1e3
gfp = np.sum(average.data ** 2, axis=0)
gfp = mne.baseline.rescale(gfp, times, baseline=(None, 0))
ax.plot(times, gfp, label=freq_name, color=color, linewidth=2.5)
ax.axhline(0, linestyle='--', color='grey', linewidth=2)
ci_low, ci_up = bootstrap_confidence_interval(average.data, random_state=0,
stat_fun=stat_fun)
ci_low = rescale(ci_low, average.times, baseline=(None, 0))
ci_up = rescale(ci_up, average.times, baseline=(None, 0))
ax.fill_between(times, gfp + ci_up, gfp - ci_low, color=color, alpha=0.3)
ax.grid(True)
ax.set_ylabel('GFP')
ax.annotate('%s (%d-%dHz)' % (freq_name, fmin, fmax),
xy=(0.95, 0.8),
horizontalalignment='right',
xycoords='axes fraction')
ax.set_xlim(-1000, 3000)
axes.ravel()[-1].set_xlabel('Time [ms]')
"""
Explanation: Now we can compute the Global Field Power
We can track the emergence of spatial patterns compared to baseline
for each frequency band, with a bootstrapped confidence interval.
We see dominant responses in the Alpha and Beta bands.
End of explanation
"""
|
kubeflow/examples | jpx-tokyo-stock-exchange-kaggle-competition/jpx-tokyo-stock-exchange-prediction-kale.ipynb | apache-2.0 | !pip install -r requirements.txt --user --quiet
"""
Explanation: JPX Tokyo Stock Exchange Kale Pipeline
In this Kaggle competition
Japan Exchange Group, Inc. (JPX) is a holding company operating one of the largest stock exchanges in the world, Tokyo Stock Exchange (TSE), and derivatives exchanges Osaka Exchange (OSE) and Tokyo Commodity Exchange (TOCOM). JPX is hosting this competition and is supported by AI technology company AlpacaJapan Co.,Ltd.
In this competition, you will model real future returns of around 2,000 stocks. The competition will involve building portfolios from the stocks eligible for predictions. The stocks are ranked from highest to lowest expected returns and they are evaluated on the difference in returns between the top and bottom 200 stocks.
End of explanation
"""
import sys, os, subprocess
from tqdm import tqdm
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
import zipfile
import joblib
from lightgbm import LGBMRegressor
from sklearn.metrics import mean_squared_error
pd.set_option('display.max_columns', 500)
"""
Explanation: Imports
In this section we import the packages we need for this example. Make it a habit to gather your imports in a single place. It will make your life easier if you are going to transform this notebook into a Kubeflow pipeline using Kale.
End of explanation
"""
# Hyper-parameters
LR = 0.379687157316759
N_EST = 100
"""
Explanation: Project hyper-parameters
In this cell, we define the different hyper-parameters. Defining them in one place makes it easier to experiment with their values and also facilitates the execution of HP Tuning experiments using Kale and Katib.
End of explanation
"""
np.random.seed(2022)
"""
Explanation: Set random seed for reproducibility and ignore warning messages.
End of explanation
"""
# setup kaggle environment for data download
# set kaggle.json path
os.environ['KAGGLE_CONFIG_DIR'] = "/home/jovyan/examples/jpx-tokyo-stock-exchange-kaggle-competition"
# grant rwo permission to .kaggle/kaggle.json
subprocess.run(["chmod","600", f"{os.environ['KAGGLE_CONFIG_DIR']}/kaggle.json"])
# download kaggle's jpx-tokyo-stock-exchange-prediction data
subprocess.run(["kaggle","competitions", "download", "-c", "jpx-tokyo-stock-exchange-prediction"])
# path to download to
data_path = 'data'
# extract jpx-tokyo-stock-exchange-prediction.zip to load_data_path
with zipfile.ZipFile("jpx-tokyo-stock-exchange-prediction.zip","r") as zip_ref:
zip_ref.extractall(data_path)
# read train_files/stock_prices.csv
df_prices = pd.read_csv(f"{data_path}/train_files/stock_prices.csv", parse_dates=['Date'])
df_prices['Date'].max()
df_prices.tail(3)
# lets check data dimensions
df_prices.shape
df_prices.info()
# check total nan values per column
df_prices.isna().sum()
"""
Explanation: Download and load the dataset
In this section, we download the data from kaggle to get it in a ready-to-use form by the model.
First, let us load and analyze the data.
The data are in csv format, thus, we use the handy read_csv pandas method. There is one train data set and two test sets (one public and one private).
End of explanation
"""
# sort data by 'Date' and 'SecuritiesCode'
df_prices.sort_values(by=['Date','SecuritiesCode'], inplace=True)
# sort data by 'Date' and 'SecuritiesCode'
df_prices.sort_values(by=['Date','SecuritiesCode'], inplace=True)
# count total trading stocks per day
idcount = df_prices.groupby("Date")["SecuritiesCode"].count().reset_index()
idcount
plt.figure(figsize=(10, 5))
plt.plot(idcount["Date"],idcount["SecuritiesCode"])
plt.axvline(x=['2021-01-01'], color='blue', label='2021-01-01')
plt.axvline(x=['2020-06-01'], color='red', label='2020-06-01')
plt.legend()
plt.show()
idcount[idcount['SecuritiesCode'] >= 2000]
idcount[idcount['SecuritiesCode'] >= 2000]['SecuritiesCode'].sum()
# filter out data with less than 2000 stock counts in a day
# dates before ‘2020-12-23’ all have stock counts less than 2000
# This is done to work with consistent data
df_prices = df_prices[(df_prices["Date"]>="2020-12-23")]
df_prices = df_prices.reset_index(drop=True)
df_prices.head()
df_prices.columns
#calculate z-scores of `df`
z_scores = stats.zscore(df_prices[['Open', 'High', 'Low', 'Close','Volume']], nan_policy='omit')
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
df_zscore = df_prices[filtered_entries]
df_zscore = df_zscore.reset_index(drop=True)
df_zscore = df_zscore.reset_index(drop=True)
"""
Explanation: Transform Data
End of explanation
"""
def feat_eng(df, features):
for i in tqdm(range(1, 4)):
# creating lag features
tmp = df[features].shift(i)
tmp.columns = [c + f'_next_shift_{i}' for c in tmp.columns]
df = pd.concat([df, tmp], sort=False, axis=1)
for i in tqdm(range(1, 4)):
df[f'weighted_vol_price_{i}'] = np.log(df[f'Volume_next_shift_{i}'] * df[[col for col in df if col.endswith(f'next_shift_{i}')][:-1]].apply(np.mean, axis=1))
# feature engineering
df['weighted_vol_price'] = np.log(df['Volume'] * (np.mean(df[features[:-1]], axis=1)))
df['BOP'] = (df['Open']-df['Close'])/(df['High']-df['Low'])
df['HL'] = df['High'] - df['Low']
df['OC'] = df['Close'] - df['Open']
df['OHLCstd'] = df[['Open','Close','High','Low']].std(axis=1)
feats = df.select_dtypes(include=float).columns
df[feats] = df[feats].apply(np.log)
# replace inf with nan
df.replace([np.inf, -np.inf], np.nan, inplace=True)
# datetime features
df['Date'] = pd.to_datetime(df['Date'])
df['Day'] = df['Date'].dt.weekday.astype(np.int32)
df["dayofyear"] = df['Date'].dt.dayofyear
df["is_weekend"] = df['Day'].isin([5, 6])
df["weekofyear"] = df['Date'].dt.weekofyear
df["month"] = df['Date'].dt.month
df["season"] = (df["month"]%12 + 3)//3
# fill nan values
df = df.fillna(0)
return df
new_feats = feat_eng(df_zscore, ['High', 'Low', 'Open', 'Close', 'Volume'])
new_feats.shape
new_feats['Target'] = df_zscore['Target']
new_feats.head(7)
new_feats.columns
"""
Explanation: <h1>Feature Engineering
End of explanation
"""
# columns to be used for modelling.
feats = ['Date','SecuritiesCode', 'Open', 'High', 'Low', 'Close', 'Volume',
'weighted_vol_price_1', 'weighted_vol_price_2', 'weighted_vol_price_3',
'weighted_vol_price', 'BOP', 'HL', 'OC', 'OHLCstd', 'Day', 'dayofyear',
'is_weekend', 'weekofyear', 'month', 'season']
# transform date to int
new_feats['Date'] = new_feats['Date'].dt.strftime("%Y%m%d").astype(int)
# split data into valid for validation and train for model training
valid = new_feats[(new_feats['Date'] >= 20211111)].copy()
train = new_feats[(new_feats['Date'] < 20211111)].copy()
train.shape, valid.shape
# model parameter
params = {
'n_estimators': int(N_EST),
'learning_rate': float(LR),
'random_state': 2022,
'verbose' : 2}
# model initialization
model = LGBMRegressor(**params)
X = train[feats]
y = train["Target"]
X_test = valid[feats]
y_test = valid["Target"]
# fitting
model.fit(X, y, verbose=False, eval_set=(X_test, y_test))
"""
Explanation: Modelling
End of explanation
"""
# model prediction
preds = model.predict(X_test)
# model evaluation
rmse = np.round(mean_squared_error(preds, y_test)**0.5, 5)
print(rmse)
"""
Explanation: <h1> Evaluation and Prediction
End of explanation
"""
sys.path.insert(0, 'helper-files')
from local_api import local_api
myapi = local_api('data/supplemental_files')
env = myapi.make_env()
iter_test = env.iter_test()
for (prices, options, financials, trades, secondary_prices, sample_prediction) in iter_test:
prices = feat_eng(prices, ['High', 'Low', 'Open', 'Close', 'Volume'])
prices['Date'] = prices['Date'].dt.strftime("%Y%m%d").astype(int)
prices["Target"] = model.predict(prices[feats])
if prices["Volume"].min()==0:
sample_prediction["Prediction"] = 0
else:
sample_prediction["Prediction"] = prices["Target"]/prices["Volume"]
sample_prediction["Prediction"] = prices["Target"]
sample_prediction.sort_values(by="Prediction", ascending=False, inplace=True)
sample_prediction['Rank'] = np.arange(0,2000)
sample_prediction.sort_values(by = "SecuritiesCode", ascending=True, inplace=True)
submission = sample_prediction[["Date","SecuritiesCode","Rank"]]
env.predict(submission)
print(env.score())
submission.head()
"""
Explanation: Make submission
End of explanation
"""
|
gojomo/gensim | docs/notebooks/soft_cosine_benchmark.ipynb | lgpl-2.1 | !git rev-parse HEAD
from copy import deepcopy
from datetime import timedelta
from itertools import product
import logging
from math import floor, ceil, log10
import pickle
from random import sample, seed, shuffle
from time import time
import numpy as np
import pandas as pd
from tqdm import tqdm_notebook
def tqdm(iterable, total=None, desc=None):
if total is None:
total = len(iterable)
for num_done, element in enumerate(tqdm_notebook(iterable, total=total)):
logger.info("%s: %d / %d", desc, num_done, total)
yield element
from gensim.corpora import Dictionary
import gensim.downloader as api
from gensim.similarities.index import AnnoyIndexer
from gensim.similarities import SparseTermSimilarityMatrix
from gensim.similarities import UniformTermSimilarityIndex
from gensim.similarities import LevenshteinSimilarityIndex
from gensim.models import WordEmbeddingSimilarityIndex
from gensim.utils import simple_preprocess
RANDOM_SEED = 12345
logger = logging.getLogger()
fhandler = logging.FileHandler(filename='matrix_speed.log', mode='a')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fhandler.setFormatter(formatter)
logger.addHandler(fhandler)
logger.setLevel(logging.INFO)
pd.set_option('display.max_rows', None, 'display.max_seq_items', None)
"""Repeatedly run a benchmark callable given various configurations and
get a list of results.
Return a list of results of repeatedly running a benchmark callable.
Parameters
----------
benchmark : callable tuple -> dict
A benchmark callable that accepts a configuration and returns results.
configurations : iterable of tuple
An iterable of configurations that are used for calling the benchmark function.
results_filename : str
A filename of a file that will be used to persistently store the results using
pickle. If the file exists, then the function will load the stored results
instead of calling the benchmark callable.
Returns
-------
iterable of tuple
The return values of the individual invocations of the benchmark callable.
"""
def benchmark_results(benchmark, configurations, results_filename):
try:
with open(results_filename, "rb") as file:
results = pickle.load(file)
except IOError:
configurations = list(configurations)
shuffle(configurations)
results = list(tqdm(
(benchmark(configuration) for configuration in configurations),
total=len(configurations), desc="benchmark"))
with open(results_filename, "wb") as file:
pickle.dump(results, file)
return results
"""
Explanation: Benchmark: Implement Levenshtein term similarity matrix and fast SCM between corpora (RaRe-Technologies/gensim PR #2016)
End of explanation
"""
full_model = api.load("word2vec-google-news-300")
try:
full_dictionary = Dictionary.load("matrix_speed.dictionary")
except IOError:
full_dictionary = Dictionary([[term] for term in full_model.vocab.keys()])
full_dictionary.save("matrix_speed.dictionary")
"""
Explanation: Implement Levenshtein term similarity matrix
In Gensim PR #1827, we added a base implementation of the soft cosine measure (SCM). The base implementation would create term similarity matrices using a single complex procedure. In the Gensim PR #2016, we split the procedure into:
TermSimilarityIndex builder classes that produce the $k$ most similar terms for a given term $t$ that are distinct from $t$ along with the term similarities, and
the SparseTermSimilarityMatrix director class that constructs term similarity matrices and consumes term similarities produced by TermSimilarityIndex instances.
One of the benefits of this separation is that we can easily measure the speed at which a TermSimilarityIndex builder class produces term similarities and compare this speed with the speed at which the SparseTermSimilarityMatrix director class consumes term similarities. This allows us to see which of the classes are a bottleneck that slows down the construction of term similarity matrices.
In this notebook, we measure all the currently available builder and director classes. For the measurements, we use the Google News word embeddings distributed with the C implementation of Word2Vec. From the word embeddings, we will derive a dictionary of 2.01M terms.
End of explanation
"""
def benchmark(configuration):
dictionary, nonzero_limit, symmetric, positive_definite, repetition = configuration
index = UniformTermSimilarityIndex(dictionary)
start_time = time()
matrix = SparseTermSimilarityMatrix(
index, dictionary, nonzero_limit=nonzero_limit, symmetric=symmetric,
positive_definite=positive_definite, dtype=np.float16).matrix
end_time = time()
duration = end_time - start_time
return {
"dictionary_size": len(dictionary),
"nonzero_limit": nonzero_limit,
"matrix_nonzero": matrix.nnz,
"repetition": repetition,
"symmetric": symmetric,
"positive_definite": positive_definite,
"duration": duration, }
dictionary_sizes = [10**k for k in range(3, int(ceil(log10(len(full_dictionary)))))]
seed(RANDOM_SEED)
dictionaries = []
for size in tqdm(dictionary_sizes, desc="dictionaries"):
dictionary = Dictionary([sample(list(full_dictionary.values()), size)])
dictionaries.append(dictionary)
dictionaries.append(full_dictionary)
nonzero_limits = [1, 10, 100]
symmetry = (True, False)
positive_definiteness = (True, False)
repetitions = range(10)
configurations = product(dictionaries, nonzero_limits, symmetry, positive_definiteness, repetitions)
results = benchmark_results(benchmark, configurations, "matrix_speed.director_results")
"""
Explanation: Director class benchmark
SparseTermSimilarityMatrix
First, we measure the speed at which the SparseTermSimilarityMatrix director class consumes term similarities.
End of explanation
"""
df = pd.DataFrame(results)
df["consumption_speed"] = df.dictionary_size * df.nonzero_limit / df.duration
df = df.groupby(["dictionary_size", "nonzero_limit", "symmetric", "positive_definite"])
def display(df):
df["duration"] = [timedelta(0, duration) for duration in df["duration"]]
df["matrix_nonzero"] = [int(nonzero) for nonzero in df["matrix_nonzero"]]
df["consumption_speed"] = ["%.02f Kword pairs / s" % (speed / 1000) for speed in df["consumption_speed"]]
return df
display(df.mean()).loc[
[10000, len(full_dictionary)], :, :].loc[
:, ["duration", "matrix_nonzero", "consumption_speed"]]
display(df.apply(lambda x: (x - x.mean()).std())).loc[
[10000, len(full_dictionary)], :, :].loc[
:, ["duration", "matrix_nonzero", "consumption_speed"]]
"""
Explanation: The following tables show how long it takes to construct a term similarity matrix (the duration column), how many nonzero elements there are in the matrix (the matrix_nonzero column) and the mean term similarity consumption speed (the consumption_speed column) as we vary the dictionary size (the dictionary_size column) the maximum number of nonzero elements outside the diagonal in every column of the matrix (the nonzero_limit column), the matrix symmetry constraint (the symmetric column), and the matrix positive definiteness constraing (the positive_definite column). Ten independendent measurements were taken. The top table shows the mean values and the bottom table shows the standard deviations.
We can see that the symmetry and positive definiteness constraints severely limit the number of nonzero elements in the resulting matrix. This in turn increases the consumption speed, since we end up throwing away most of the elements that we consume. The effects of the dictionary size on the mean term similarity consumption speed are minor to none.
End of explanation
"""
def benchmark(configuration):
dictionary, nonzero_limit, repetition = configuration
start_time = time()
index = UniformTermSimilarityIndex(dictionary)
end_time = time()
constructor_duration = end_time - start_time
start_time = time()
for term in dictionary.values():
for _j, _k in zip(index.most_similar(term, topn=nonzero_limit), range(nonzero_limit)):
pass
end_time = time()
production_duration = end_time - start_time
return {
"dictionary_size": len(dictionary),
"nonzero_limit": nonzero_limit,
"repetition": repetition,
"constructor_duration": constructor_duration,
"production_duration": production_duration, }
nonzero_limits = [1, 10, 100, 1000]
configurations = product(dictionaries, nonzero_limits, repetitions)
results = benchmark_results(benchmark, configurations, "matrix_speed.builder_results.uniform")
"""
Explanation: Builder class benchmark
UniformTermSimilarityIndex
First, we measure the speed at which the UniformTermSimilarityIndex builder class produces term similarities. UniformTermSimilarityIndex is a dummy class that just generates a sequence of constants. It produces much more term similarities per second than the SparseTermSimilarityMatrix is capable of consuming and its results will serve as an upper limit.
End of explanation
"""
df = pd.DataFrame(results)
df["processing_speed"] = df.dictionary_size ** 2 / df.production_duration
df["production_speed"] = df.dictionary_size * df.nonzero_limit / df.production_duration
df = df.groupby(["dictionary_size", "nonzero_limit"])
def display(df):
df["constructor_duration"] = [timedelta(0, duration) for duration in df["constructor_duration"]]
df["production_duration"] = [timedelta(0, duration) for duration in df["production_duration"]]
df["processing_speed"] = ["%.02f Kword pairs / s" % (speed / 1000) for speed in df["processing_speed"]]
df["production_speed"] = ["%.02f Kword pairs / s" % (speed / 1000) for speed in df["production_speed"]]
return df
display(df.mean()).loc[
[1000, len(full_dictionary)], :, :].loc[
:, ["production_duration", "production_speed"]]
display(df.apply(lambda x: (x - x.mean()).std())).loc[
[1000, len(full_dictionary)], :, :].loc[
:, ["production_duration", "production_speed"]]
"""
Explanation: The following tables show how long it takes to retrieve the most similar terms for all terms in a dictionary (the production_duration column) and the mean term similarity production speed (the production_speed column) as we vary the dictionary size (the dictionary_size column), and the maximum number of most similar terms that will be retrieved (the nonzero_limit column). Ten independendent measurements were taken. The top table shows the mean values and the bottom table shows the standard deviations.
The production_speed is proportional to nonzero_limit.
End of explanation
"""
def benchmark(configuration):
dictionary, nonzero_limit, query_terms, repetition = configuration
start_time = time()
index = LevenshteinSimilarityIndex(dictionary)
end_time = time()
constructor_duration = end_time - start_time
start_time = time()
for term in query_terms:
for _j, _k in zip(index.most_similar(term, topn=nonzero_limit), range(nonzero_limit)):
pass
end_time = time()
production_duration = end_time - start_time
return {
"dictionary_size": len(dictionary),
"mean_query_term_length": np.mean([len(term) for term in query_terms]),
"nonzero_limit": nonzero_limit,
"repetition": repetition,
"constructor_duration": constructor_duration,
"production_duration": production_duration, }
nonzero_limits = [1, 10, 100]
seed(RANDOM_SEED)
min_dictionary = sorted((len(dictionary), dictionary) for dictionary in dictionaries)[0][1]
query_terms = sample(list(min_dictionary.values()), 10)
configurations = product(dictionaries, nonzero_limits, [query_terms], repetitions)
results = benchmark_results(benchmark, configurations, "matrix_speed.builder_results.levenshtein")
"""
Explanation: LevenshteinSimilarityIndex
Next, we measure the speed at which the LevenshteinSimilarityIndex builder class produces term similarities. LevenshteinSimilarityIndex is currently just a naïve implementation that produces much fewer term similarities per second than the SparseTermSimilarityMatrix class is capable of consuming.
End of explanation
"""
df = pd.DataFrame(results)
df["processing_speed"] = df.dictionary_size * len(query_terms) / df.production_duration
df["production_speed"] = df.nonzero_limit * len(query_terms) / df.production_duration
df = df.groupby(["dictionary_size", "nonzero_limit"])
def display(df):
df["constructor_duration"] = [timedelta(0, duration) for duration in df["constructor_duration"]]
df["production_duration"] = [timedelta(0, duration) for duration in df["production_duration"]]
df["processing_speed"] = ["%.02f Kword pairs / s" % (speed / 1000) for speed in df["processing_speed"]]
df["production_speed"] = ["%.02f word pairs / s" % speed for speed in df["production_speed"]]
return df
display(df.mean()).loc[
[1000, 1000000, len(full_dictionary)], :].loc[
:, ["production_duration", "production_speed", "processing_speed"]]
display(df.apply(lambda x: (x - x.mean()).std())).loc[
[1000, 1000000, len(full_dictionary)], :].loc[
:, ["production_duration", "production_speed", "processing_speed"]]
"""
Explanation: The following tables show how long it takes to retrieve the most similar terms for ten randomly sampled terms from a dictionary (the production_duration column), the mean term similarity production speed (the production_speed column) and the mean term similarity processing speed (the processing_speed column) as we vary the dictionary size (the dictionary_size column), and the maximum number of most similar terms that will be retrieved (the nonzero_limit column). Ten independendent measurements were taken. The top table shows the mean values and the bottom table shows the standard deviations.
The production_speed is proportional to nonzero_limit / dictionary_size. The processing_speed is constant.
End of explanation
"""
def benchmark(configuration):
(model, dictionary), nonzero_limit, annoy_n_trees, query_terms, repetition = configuration
use_annoy = annoy_n_trees > 0
model.init_sims()
start_time = time()
if use_annoy:
annoy = AnnoyIndexer(model, annoy_n_trees)
kwargs = {"indexer": annoy}
else:
kwargs = {}
index = WordEmbeddingSimilarityIndex(model, kwargs=kwargs)
end_time = time()
constructor_duration = end_time - start_time
start_time = time()
for term in query_terms:
for _j, _k in zip(index.most_similar(term, topn=nonzero_limit), range(nonzero_limit)):
pass
end_time = time()
production_duration = end_time - start_time
return {
"dictionary_size": len(dictionary),
"mean_query_term_length": np.mean([len(term) for term in query_terms]),
"nonzero_limit": nonzero_limit,
"use_annoy": use_annoy,
"annoy_n_trees": annoy_n_trees,
"repetition": repetition,
"constructor_duration": constructor_duration,
"production_duration": production_duration, }
models = []
for dictionary in tqdm(dictionaries, desc="models"):
if dictionary == full_dictionary:
models.append(full_model)
continue
model = full_model.__class__(full_model.vector_size)
model.vocab = {word: deepcopy(full_model.vocab[word]) for word in dictionary.values()}
model.index2entity = []
vector_indices = []
for index, word in enumerate(full_model.index2entity):
if word in model.vocab.keys():
model.index2entity.append(word)
model.vocab[word].index = len(vector_indices)
vector_indices.append(index)
model.vectors = full_model.vectors[vector_indices]
models.append(model)
annoy_n_trees = [0] + [10**k for k in range(3)]
seed(RANDOM_SEED)
query_terms = sample(list(min_dictionary.values()), 1000)
configurations = product(zip(models, dictionaries), nonzero_limits, annoy_n_trees, [query_terms], repetitions)
results = benchmark_results(benchmark, configurations, "matrix_speed.builder_results.wordembeddings")
"""
Explanation: WordEmbeddingSimilarityIndex
Lastly, we measure the speed at which the WordEmbeddingSimilarityIndex builder class constructs an instance and produces term similarities. Gensim currently supports slow and precise nearest neighbor search, and also approximate nearest neighbor search using ANNOY. We evaluate both options.
End of explanation
"""
df = pd.DataFrame(results)
df["processing_speed"] = df.dictionary_size * len(query_terms) / df.production_duration
df["production_speed"] = df.nonzero_limit * len(query_terms) / df.production_duration
df = df.groupby(["dictionary_size", "nonzero_limit", "annoy_n_trees"])
def display(df):
df["constructor_duration"] = [timedelta(0, duration) for duration in df["constructor_duration"]]
df["production_duration"] = [timedelta(0, duration) for duration in df["production_duration"]]
df["processing_speed"] = ["%.02f Kword pairs / s" % (speed / 1000) for speed in df["processing_speed"]]
df["production_speed"] = ["%.02f Kword pairs / s" % (speed / 1000) for speed in df["production_speed"]]
return df
display(df.mean()).loc[
[1000000, len(full_dictionary)], [1, 100], [0, 1, 100]].loc[
:, ["constructor_duration", "production_duration", "production_speed", "processing_speed"]]
display(df.apply(lambda x: (x - x.mean()).std())).loc[
[1000000, len(full_dictionary)], [1, 100], [0, 1, 100]].loc[
:, ["constructor_duration", "production_duration", "production_speed", "processing_speed"]]
"""
Explanation: The following tables show how long it takes to construct an ANNOY index and the builder class instance (the constructor_duration column), how long it takes to retrieve the most similar terms for 1,000 randomly sampled terms from a dictionary (the production_duration column), the mean term similarity production speed (the production_speed column) and the mean term similarity processing speed (the processing_speed column) as we vary the dictionary size (the dictionary_size column), the maximum number of most similar terms that will be retrieved (the nonzero_limit column), and the number of constructed ANNOY trees (the annoy_n_trees column). Ten independendent measurements were taken. The top table shows the mean values and the bottom table shows the standard deviations.
If we do not use ANNOY (annoy_n_trees${}=0$), then production_speed is proportional to nonzero_limit / dictionary_size.
If we do use ANNOY (annoy_n_trees${}>0$), then production_speed is proportional to nonzero_limit / (annoy_n_trees)${}^{1/2}$.
End of explanation
"""
full_model = api.load("word2vec-google-news-300")
try:
with open("matrix_speed.corpus", "rb") as file:
full_corpus = pickle.load(file)
except IOError:
original_corpus = list(tqdm(api.load("wiki-english-20171001"), desc="original_corpus", total=4924894))
seed(RANDOM_SEED)
full_corpus = [
simple_preprocess(u'\n'.join(article["section_texts"]))
for article in tqdm(sample(original_corpus, 10**5), desc="full_corpus", total=10**5)]
del original_corpus
with open("matrix_speed.corpus", "wb") as file:
pickle.dump(full_corpus, file)
try:
full_dictionary = Dictionary.load("matrix_speed.dictionary")
except IOError:
full_dictionary = Dictionary([[term] for term in full_model.vocab.keys()])
full_dictionary.save("matrix_speed.dictionary")
"""
Explanation: Implement fast SCM between corpora
In Gensim PR #1827, we added a base implementation of the soft cosine measure (SCM). The base implementation would compute SCM between single documents using the softcossim function. In the Gensim PR #2016, we intruduced the SparseTermSimilarityMatrix.inner_product method, which computes SCM not only between single documents, but also between a document and a corpus, and between two corpora.
For the measurements, we use the Google News word embeddings distributed with the C implementation of Word2Vec. From the word embeddings, we will derive a dictionary of 2.01m terms. As a corpus, we will use a random sample of 100K articles from the 4.92m English Wikipedia articles.
End of explanation
"""
def benchmark(configuration):
(matrix, dictionary, nonzero_limit), corpus, normalized, repetition = configuration
corpus_size = len(corpus)
corpus = [dictionary.doc2bow(doc) for doc in corpus]
corpus = [vec for vec in corpus if len(vec) > 0]
start_time = time()
for vec1 in corpus:
for vec2 in corpus:
matrix.inner_product(vec1, vec2, normalized=normalized)
end_time = time()
duration = end_time - start_time
return {
"dictionary_size": matrix.matrix.shape[0],
"matrix_nonzero": matrix.matrix.nnz,
"nonzero_limit": nonzero_limit,
"normalized": normalized,
"corpus_size": corpus_size,
"corpus_actual_size": len(corpus),
"corpus_nonzero": sum(len(vec) for vec in corpus),
"mean_document_length": np.mean([len(doc) for doc in corpus]),
"repetition": repetition,
"duration": duration, }
seed(RANDOM_SEED)
dictionary_sizes = [1000, 100000]
dictionaries = []
for size in tqdm(dictionary_sizes, desc="dictionaries"):
dictionary = Dictionary([sample(list(full_dictionary.values()), size)])
dictionaries.append(dictionary)
min_dictionary = sorted((len(dictionary), dictionary) for dictionary in dictionaries)[0][1]
corpus_sizes = [100, 1000]
corpora = []
for size in tqdm(corpus_sizes, desc="corpora"):
corpus = sample(full_corpus, size)
corpora.append(corpus)
models = []
for dictionary in tqdm(dictionaries, desc="models"):
if dictionary == full_dictionary:
models.append(full_model)
continue
model = full_model.__class__(full_model.vector_size)
model.vocab = {word: deepcopy(full_model.vocab[word]) for word in dictionary.values()}
model.index2entity = []
vector_indices = []
for index, word in enumerate(full_model.index2entity):
if word in model.vocab.keys():
model.index2entity.append(word)
model.vocab[word].index = len(vector_indices)
vector_indices.append(index)
model.vectors = full_model.vectors[vector_indices]
models.append(model)
nonzero_limits = [1, 10, 100]
matrices = []
for (model, dictionary), nonzero_limit in tqdm(
list(product(zip(models, dictionaries), nonzero_limits)), desc="matrices"):
annoy = AnnoyIndexer(model, 1)
index = WordEmbeddingSimilarityIndex(model, kwargs={"indexer": annoy})
matrix = SparseTermSimilarityMatrix(index, dictionary, nonzero_limit=nonzero_limit)
matrices.append((matrix, dictionary, nonzero_limit))
del annoy
normalization = (True, False)
repetitions = range(10)
configurations = product(matrices, corpora, normalization, repetitions)
results = benchmark_results(benchmark, configurations, "matrix_speed.inner-product_results.doc_doc")
"""
Explanation: SCM between two documents
First, we measure the speed at which the inner_product method produces term similarities between single documents.
End of explanation
"""
df = pd.DataFrame(results)
df["speed"] = df.corpus_actual_size**2 / df.duration
del df["corpus_actual_size"]
df = df.groupby(["dictionary_size", "corpus_size", "nonzero_limit", "normalized"])
def display(df):
df["duration"] = [timedelta(0, duration) for duration in df["duration"]]
df["speed"] = ["%.02f Kdoc pairs / s" % (speed / 1000) for speed in df["speed"]]
return df
display(df.mean()).loc[
[1000, 100000], :, [1, 100], :].loc[
:, ["duration", "corpus_nonzero", "matrix_nonzero", "speed"]]
display(df.apply(lambda x: (x - x.mean()).std())).loc[
[1000, 100000], :, [1, 100], :].loc[
:, ["duration", "corpus_nonzero", "matrix_nonzero", "speed"]]
"""
Explanation: The following tables show how long it takes to compute the inner_product method between all document vectors in a corpus (the duration column), how many nonzero elements there are in a corpus matrix (the corpus_nonzero column), how many nonzero elements there are in a term similarity matrix (the matrix_nonzero column) and the mean document similarity production speed (the speed column) as we vary the dictionary size (the dictionary_size column), the size of the corpus (the corpus_size column), the maximum number of nonzero elements in a single column of the matrix (the nonzero_limit column), and the matrix symmetry constraint (the symmetric column). Ten independendent measurements were taken. The top table shows the mean values and the bottom table shows the standard deviations.
The speed is proportional to the square of the number of unique terms shared by the two document vectors. In our scenario as well as the standard IR scenario, this means speed is constant. Computing a normalized inner product (normalized${}={}$True) results in a constant speed decrease.
End of explanation
"""
def benchmark(configuration):
(matrix, dictionary, nonzero_limit), corpus, normalized, repetition = configuration
corpus_size = len(corpus)
corpus = [dictionary.doc2bow(doc) for doc in corpus if doc]
start_time = time()
for vec in corpus:
matrix.inner_product(vec, corpus, normalized=normalized)
end_time = time()
duration = end_time - start_time
return {
"dictionary_size": matrix.matrix.shape[0],
"matrix_nonzero": matrix.matrix.nnz,
"nonzero_limit": nonzero_limit,
"normalized": normalized,
"corpus_size": corpus_size,
"corpus_actual_size": len(corpus),
"corpus_nonzero": sum(len(vec) for vec in corpus),
"mean_document_length": np.mean([len(doc) for doc in corpus]),
"repetition": repetition,
"duration": duration, }
configurations = product(matrices, corpora, normalization, repetitions)
results = benchmark_results(benchmark, configurations, "matrix_speed.inner-product_results.doc_corpus")
"""
Explanation: SCM between a document and a corpus
Next, we measure the speed at which the inner_product method produces term similarities between documents and a corpus.
End of explanation
"""
df = pd.DataFrame(results)
df["speed"] = df.corpus_actual_size**2 / df.duration
del df["corpus_actual_size"]
df = df.groupby(["dictionary_size", "corpus_size", "nonzero_limit", "normalized"])
def display(df):
df["duration"] = [timedelta(0, duration) for duration in df["duration"]]
df["speed"] = ["%.02f Kdoc pairs / s" % (speed / 1000) for speed in df["speed"]]
return df
display(df.mean()).loc[
[1000, 100000], :, [1, 100], :].loc[
:, ["duration", "corpus_nonzero", "matrix_nonzero", "speed"]]
display(df.apply(lambda x: (x - x.mean()).std())).loc[
[1000, 100000], :, [1, 100], :].loc[
:, ["duration", "corpus_nonzero", "matrix_nonzero", "speed"]]
"""
Explanation: The speed is inversely proportional to matrix_nonzero. Computing a normalized inner product (normalized${}={}$True) results in a constant speed decrease.
End of explanation
"""
def benchmark(configuration):
(matrix, dictionary, nonzero_limit), corpus, normalized, repetition = configuration
corpus_size = len(corpus)
corpus = [dictionary.doc2bow(doc) for doc in corpus]
corpus = [vec for vec in corpus if len(vec) > 0]
start_time = time()
matrix.inner_product(corpus, corpus, normalized=normalized)
end_time = time()
duration = end_time - start_time
return {
"dictionary_size": matrix.matrix.shape[0],
"matrix_nonzero": matrix.matrix.nnz,
"nonzero_limit": nonzero_limit,
"normalized": normalized,
"corpus_size": corpus_size,
"corpus_actual_size": len(corpus),
"corpus_nonzero": sum(len(vec) for vec in corpus),
"mean_document_length": np.mean([len(doc) for doc in corpus]),
"repetition": repetition,
"duration": duration, }
nonzero_limits = [1000]
dense_matrices = []
for (model, dictionary), nonzero_limit in tqdm(
list(product(zip(models, dictionaries), nonzero_limits)), desc="matrices"):
annoy = AnnoyIndexer(model, 1)
index = WordEmbeddingSimilarityIndex(model, kwargs={"indexer": annoy})
matrix = SparseTermSimilarityMatrix(index, dictionary, nonzero_limit=nonzero_limit)
matrices.append((matrix, dictionary, nonzero_limit))
del annoy
configurations = product(matrices + dense_matrices, corpora + [full_corpus], normalization, repetitions)
results = benchmark_results(benchmark, configurations, "matrix_speed.inner-product_results.corpus_corpus")
df = pd.DataFrame(results)
df["speed"] = df.corpus_actual_size**2 / df.duration
del df["corpus_actual_size"]
df = df.groupby(["dictionary_size", "corpus_size", "nonzero_limit", "normalized"])
def display(df):
df["duration"] = [timedelta(0, duration) for duration in df["duration"]]
df["speed"] = ["%.02f Kdoc pairs / s" % (speed / 1000) for speed in df["speed"]]
return df
display(df.mean()).loc[
[1000, 100000], :, [1, 10, 100, 1000], :].loc[
:, ["duration", "corpus_nonzero", "matrix_nonzero", "speed"]]
display(df.apply(lambda x: (x - x.mean()).std())).loc[
[1000, 100000], :, [1, 100], :].loc[
:, ["duration", "corpus_nonzero", "matrix_nonzero", "speed"]]
"""
Explanation: SCM between two corpora
Lastly, we measure the speed at which the inner_product method produces term similarities between entire corpora.
End of explanation
"""
|
vravishankar/Jupyter-Books | Closure.ipynb | mit | def print_msg(msg): # This is the outer enclosing function
def printer(): # This is the nested function
print(msg)
printer()
print_msg('Hello')
"""
Explanation: Closures
Before getting into closures lets understand nested functions. A function defined inside another function is called a nested function. Nested functions can access variables of the enclosing scope.
In Python, non-local variables are read only by default and we must declare them explicitly as non-local in order to modify them.
Please find an example below of a nested function accessing a non-local variable.
End of explanation
"""
def print_msg(msg): # This is the outer enclosing function
def printer(): # This is the nested function
print(msg)
return printer # This is changed from the above example
another = print_msg("Hello")
another()
"""
Explanation: We can see that the nested function printer() was able to access the non-local variable msg of the enclosing function.
In the example above, what would happen if the last line of the function print_msg() returned the printer() function instead of calling it? This means the function was defined as follows:
End of explanation
"""
del print_msg
another()
"""
Explanation: In the example above the print_msg() function was called with the string "Hello" and the returned function was bound to the name another. On calling another(), the message was still remembered although we have already finished executing the print_msg() function.
This technique by which some data ("Hello") get attached to the code is called closure in python.
This value in the enclosing scope is remembered even when the variable goes out of scope or the function itself is removed from the current namespace.
End of explanation
"""
def make_multiplier_of(n):
def multiplier(x):
return x * n
return multiplier
# multiplier of 3
times3 = make_multiplier_of(3)
# multiplier of 5
times5 = make_multiplier_of(5)
print(times3(9))
print(times5(3))
print(times5(times3(2)))
"""
Explanation: The criteria that must be met to create closure in python
We must have a nested function (function inside a function)
Nested function must refer to value defined in the enclosing function.
The enclosing function must return the nested function.
Closures are good when:
To avoid global values and provide some form of data hiding.
To provide an object oriented solution to the problem.
When there are few methods (one method in most cases) to be implemented in a class, closures can provide an alternate and more elegant solutions. But when the number of attributes and methods get larger, better implement a class.
End of explanation
"""
make_multiplier_of.__closure__
times3.__closure__
times3.__closure__[0].cell_contents
times5.__closure__[0].cell_contents
"""
Explanation: All function objects have a __closure__attribute that returns a tuple of cell objects if it is a closure function.
End of explanation
"""
|
mdda/fossasia-2016_deep-learning | notebooks/work-in-progress/translation/3-parallel-texts-aggregate.ipynb | mit | import os
import csv
import time, random
import re
lang_from, lang_to = 'en', 'ko'
data_path = './data'
"""
Explanation: Aggregate Parallel Texts in directory
This assumes that we have a bunch of .csv files with the filename in the format ${source}-${lang}.csv, where each file has the header ts,txt to read in the text at each numeric timestamp.
End of explanation
"""
stub_from, stub_to = set(),set()
stub_matcher = re.compile(r"(.*)\-(\w+)\.csv")
for fname in os.listdir(data_path):
#print(fname)
m = stub_matcher.match(fname)
if m:
stub, lang = m.group(1), m.group(2)
if lang == lang_from: stub_from.add(stub)
if lang == lang_to: stub_to.add(stub)
stub_both = stub_from.intersection(stub_to)
"""
Explanation: Go through all the files in the directory, and find the source prefixes that have both lang_from and lang_to CSVs available.
End of explanation
"""
correspondence_loc,txt_from,txt_to=[],[],[]
def read_dict_from_csv(fname):
d=dict()
with open(fname, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
d[float(row['ts'])]=row['txt']
return d
for stub in stub_both:
#print("Reading stub %s" % (stub,))
data_from = read_dict_from_csv( os.path.join(data_path, stub+'-'+lang_from+'.csv') )
data_to = read_dict_from_csv( os.path.join(data_path, stub+'-'+lang_to+'.csv') )
valid, skipped=0, 0
for ts, txt in data_from.items():
if ts in data_to:
correspondence_loc.append( (stub, ts) )
txt_from.append( txt )
txt_to.append( data_to[ts] )
valid += 1
else:
skipped += 1
print("%3d valid of %3d fragments from '%s'" % (valid, valid+skipped, stub))
print(" Total data : %d text fragments" % (len(correspondence_loc),))
for _ in range(10):
i = random.randrange(len(correspondence_loc))
print( txt_from[i], txt_to[i] )
"""
Explanation: Now, go through stub_both and for each CSVs, read in both languages, and take all the txt entries at the same timestamps, and build the correspondence.
End of explanation
"""
sub_punctuation = re.compile(r'[\,\.\:\;\?\!\-\—\s\"0-9\(\)]+')
sub_apostrophes = re.compile(r'\'(\w+)')
sub_multispaces = re.compile(r'\s\s+')
if lang_from=='ja' or lang_to=='ja':
import tinysegmenter
ja_segmenter = tinysegmenter.TinySegmenter()
sub_punc_ja = re.compile(r'[\」\「\?\。\、\・\(\)\―]+')
def tokenize_txt(arr, lang):
tok=[]
for txt in arr:
t = txt.lower()
t = re.sub(sub_punctuation, u' ', t)
if "'" in t:
t = re.sub(sub_apostrophes, r" '\1", t)
if lang=='ja':
t = ' '.join( ja_segmenter.tokenize(t) )
t = re.sub(sub_punc_ja, u' ', t)
t = re.sub(sub_multispaces, ' ', t)
tok.append(t.strip())
return tok
tok_from = tokenize_txt(txt_from, lang_from)
tok_to = tokenize_txt(txt_to, lang_to)
tok_from[220:250]
tok_to[220:250]
"""
Explanation: Tokenize the correspondences
NB: Japanese requires word-splitting too
End of explanation
"""
def build_freq(tok_arr):
f=dict()
for tok in tok_arr:
for w in tok.split():
if w not in f: f[w]=0
f[w]+=1
return f
freq_from=build_freq(tok_from)
freq_to =build_freq(tok_to)
len(freq_from),len(freq_to),
def most_frequent(freq, n=50, start=0):
return ', '.join( sorted(freq,key=lambda w:freq[w], reverse=True)[start:n+start] )
print(most_frequent(freq_from))
print(most_frequent(freq_to, n=100))
print(most_frequent(freq_from, n=20, start=9000))
print( len( [_ for w,f in freq_from.items() if f>=10]))
print( len( [_ for w,f in freq_to.items() if f>=10]))
def build_rank(freq):
return { w:i for i,w in enumerate( sorted(freq, key=lambda w:freq[w], reverse=True) ) }
rank_from = build_rank(freq_from)
rank_to = build_rank(freq_to)
print(rank_from['robot'])
def max_rank(tok, rank): # Find the most infrequent word in this tokenized sentence
r = -1
for w in tok.split():
if rank[w]>r: r=rank[w]
return r
tok_max_rank_from = [ max_rank(tok, rank_from) for tok in tok_from ]
tok_max_rank_to = [ max_rank(tok, rank_to) for tok in tok_to ]
start=0;print(tok_max_rank_from[start:start+15], '\n', tok_max_rank_to[start:start+15],)
i=0; tok_max_rank_from[i], tok_from[i], tok_to[i], tok_max_rank_to[i],
"""
Explanation: Build frequency dictionaries
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/awi/cmip6/models/sandbox-2/atmos.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'awi', 'sandbox-2', 'atmos')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmos
MIP Era: CMIP6
Institute: AWI
Source ID: SANDBOX-2
Topic: Atmos
Sub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos.
Properties: 156 (127 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:37
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties --> Overview
2. Key Properties --> Resolution
3. Key Properties --> Timestepping
4. Key Properties --> Orography
5. Grid --> Discretisation
6. Grid --> Discretisation --> Horizontal
7. Grid --> Discretisation --> Vertical
8. Dynamical Core
9. Dynamical Core --> Top Boundary
10. Dynamical Core --> Lateral Boundary
11. Dynamical Core --> Diffusion Horizontal
12. Dynamical Core --> Advection Tracers
13. Dynamical Core --> Advection Momentum
14. Radiation
15. Radiation --> Shortwave Radiation
16. Radiation --> Shortwave GHG
17. Radiation --> Shortwave Cloud Ice
18. Radiation --> Shortwave Cloud Liquid
19. Radiation --> Shortwave Cloud Inhomogeneity
20. Radiation --> Shortwave Aerosols
21. Radiation --> Shortwave Gases
22. Radiation --> Longwave Radiation
23. Radiation --> Longwave GHG
24. Radiation --> Longwave Cloud Ice
25. Radiation --> Longwave Cloud Liquid
26. Radiation --> Longwave Cloud Inhomogeneity
27. Radiation --> Longwave Aerosols
28. Radiation --> Longwave Gases
29. Turbulence Convection
30. Turbulence Convection --> Boundary Layer Turbulence
31. Turbulence Convection --> Deep Convection
32. Turbulence Convection --> Shallow Convection
33. Microphysics Precipitation
34. Microphysics Precipitation --> Large Scale Precipitation
35. Microphysics Precipitation --> Large Scale Cloud Microphysics
36. Cloud Scheme
37. Cloud Scheme --> Optical Cloud Properties
38. Cloud Scheme --> Sub Grid Scale Water Distribution
39. Cloud Scheme --> Sub Grid Scale Ice Distribution
40. Observation Simulation
41. Observation Simulation --> Isscp Attributes
42. Observation Simulation --> Cosp Attributes
43. Observation Simulation --> Radar Inputs
44. Observation Simulation --> Lidar Inputs
45. Gravity Waves
46. Gravity Waves --> Orographic Gravity Waves
47. Gravity Waves --> Non Orographic Gravity Waves
48. Solar
49. Solar --> Solar Pathways
50. Solar --> Solar Constant
51. Solar --> Orbital Parameters
52. Solar --> Insolation Ozone
53. Volcanos
54. Volcanos --> Volcanoes Treatment
1. Key Properties --> Overview
Top level key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmosphere model code (CAM 4.0, ARPEGE 3.2,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of atmospheric model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the atmosphere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Resolution
Characteristics of the model resolution
2.1. Horizontal Resolution Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.4. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on the computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 2.5. High Top
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping
Characteristics of the atmosphere model time stepping
3.1. Timestep Dynamics
Is Required: TRUE Type: STRING Cardinality: 1.1
Timestep for the dynamics, e.g. 30 min.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.2. Timestep Shortwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the shortwave radiative transfer, e.g. 1.5 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestep Longwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the longwave radiative transfer, e.g. 3 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Orography
Characteristics of the model orography
4.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the orography.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
"""
Explanation: 4.2. Changes
Is Required: TRUE Type: ENUM Cardinality: 1.N
If the orography type is modified describe the time adaptation changes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid --> Discretisation
Atmosphere grid discretisation
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of grid discretisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Discretisation --> Horizontal
Atmosphere discretisation in the horizontal
6.1. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
"""
Explanation: 6.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.3. Scheme Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation function order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.4. Horizontal Pole
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal discretisation pole singularity treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.5. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Discretisation --> Vertical
Atmosphere discretisation in the vertical
7.1. Coordinate Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Type of vertical coordinate system
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Dynamical Core
Characteristics of the dynamical core
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere dynamical core
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the dynamical core of the model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Timestepping Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Timestepping framework type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of the model prognostic variables
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9. Dynamical Core --> Top Boundary
Type of boundary layer at the top of the model
9.1. Top Boundary Condition
Is Required: TRUE Type: ENUM Cardinality: 1.1
Top boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Top Heat
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary heat treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Top Wind
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary wind treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Dynamical Core --> Lateral Boundary
Type of lateral boundary condition (if the model is a regional model)
10.1. Condition
Is Required: FALSE Type: ENUM Cardinality: 0.1
Type of lateral boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Dynamical Core --> Diffusion Horizontal
Horizontal diffusion scheme
11.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Horizontal diffusion scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal diffusion scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12. Dynamical Core --> Advection Tracers
Tracer advection scheme
12.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Tracer advection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.3. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.4. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracer advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Dynamical Core --> Advection Momentum
Momentum advection scheme
13.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Momentum advection schemes name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.3. Scheme Staggering Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme staggering type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.4. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Radiation
Characteristics of the atmosphere radiation process
14.1. Aerosols
Is Required: TRUE Type: ENUM Cardinality: 1.N
Aerosols whose radiative effect is taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Radiation --> Shortwave Radiation
Properties of the shortwave radiation scheme
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of shortwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Shortwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Shortwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Shortwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Radiation --> Shortwave GHG
Representation of greenhouse gases in the shortwave radiation scheme
16.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Radiation --> Shortwave Cloud Ice
Shortwave radiative properties of ice crystals in clouds
17.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Radiation --> Shortwave Cloud Liquid
Shortwave radiative properties of liquid droplets in clouds
18.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Radiation --> Shortwave Cloud Inhomogeneity
Cloud inhomogeneity in the shortwave radiation scheme
19.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Radiation --> Shortwave Aerosols
Shortwave radiative properties of aerosols
20.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21. Radiation --> Shortwave Gases
Shortwave radiative properties of gases
21.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Radiation --> Longwave Radiation
Properties of the longwave radiation scheme
22.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of longwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the longwave radiation scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Longwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Longwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 22.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Longwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiation --> Longwave GHG
Representation of greenhouse gases in the longwave radiation scheme
23.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24. Radiation --> Longwave Cloud Ice
Longwave radiative properties of ice crystals in clouds
24.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.2. Physical Reprenstation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Radiation --> Longwave Cloud Liquid
Longwave radiative properties of liquid droplets in clouds
25.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Radiation --> Longwave Cloud Inhomogeneity
Cloud inhomogeneity in the longwave radiation scheme
26.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Radiation --> Longwave Aerosols
Longwave radiative properties of aerosols
27.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 28. Radiation --> Longwave Gases
Longwave radiative properties of gases
28.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Turbulence Convection
Atmosphere Convective Turbulence and Clouds
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere convection and turbulence
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30. Turbulence Convection --> Boundary Layer Turbulence
Properties of the boundary layer turbulence scheme
30.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Boundary layer turbulence scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Boundary layer turbulence scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Closure Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Boundary layer turbulence scheme closure order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Counter Gradient
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Uses boundary layer turbulence scheme counter gradient
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 31. Turbulence Convection --> Deep Convection
Properties of the deep convection scheme
31.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Deep convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of deep convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Turbulence Convection --> Shallow Convection
Properties of the shallow convection scheme
32.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Shallow convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
shallow convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
"""
Explanation: 32.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
shallow convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33. Microphysics Precipitation
Large Scale Cloud Microphysics and Precipitation
33.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of large scale cloud microphysics and precipitation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Microphysics Precipitation --> Large Scale Precipitation
Properties of the large scale precipitation scheme
34.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the large scale precipitation parameterisation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 34.2. Hydrometeors
Is Required: TRUE Type: ENUM Cardinality: 1.N
Precipitating hydrometeors taken into account in the large scale precipitation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Properties of the large scale cloud microphysics scheme
35.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the microphysics parameterisation scheme used for large scale clouds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 35.2. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Large scale cloud microphysics processes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36. Cloud Scheme
Characteristics of the cloud scheme
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the atmosphere cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
"""
Explanation: 36.3. Atmos Coupling
Is Required: FALSE Type: ENUM Cardinality: 0.N
Atmosphere components that are linked to the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.4. Uses Separate Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Different cloud schemes for the different types of clouds (convective, stratiform and boundary layer)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Processes included in the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.6. Prognostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a prognostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.7. Diagnostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a diagnostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.8. Prognostic Variables
Is Required: FALSE Type: ENUM Cardinality: 0.N
List the prognostic variables used by the cloud scheme, if applicable.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 37. Cloud Scheme --> Optical Cloud Properties
Optical cloud properties
37.1. Cloud Overlap Method
Is Required: FALSE Type: ENUM Cardinality: 0.1
Method for taking into account overlapping of cloud layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.2. Cloud Inhomogeneity
Is Required: FALSE Type: STRING Cardinality: 0.1
Method for taking into account cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Sub-grid scale water distribution
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale water distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 38.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale water distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 38.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale water distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 38.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale water distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Sub-grid scale ice distribution
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale ice distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 39.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale ice distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 39.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale ice distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 39.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale ice distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 40. Observation Simulation
Characteristics of observation simulation
40.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of observation simulator characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41. Observation Simulation --> Isscp Attributes
ISSCP Characteristics
41.1. Top Height Estimation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator ISSCP top height estimation methodUo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41.2. Top Height Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator ISSCP top height direction
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 42. Observation Simulation --> Cosp Attributes
CFMIP Observational Simulator Package attributes
42.1. Run Configuration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator COSP run configuration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.2. Number Of Grid Points
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of grid points
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.3. Number Of Sub Columns
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of sub-cloumns used to simulate sub-grid variability
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.4. Number Of Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of levels
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 43. Observation Simulation --> Radar Inputs
Characteristics of the cloud radar simulator
43.1. Frequency
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Cloud simulator radar frequency (Hz)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 43.2. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator radar type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.3. Gas Absorption
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses gas absorption
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.4. Effective Radius
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses effective radius
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44. Observation Simulation --> Lidar Inputs
Characteristics of the cloud lidar simulator
44.1. Ice Types
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator lidar ice type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44.2. Overlap
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator lidar overlap
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 45. Gravity Waves
Characteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources.
45.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of gravity wave parameterisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.2. Sponge Layer
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sponge layer in the upper levels in order to avoid gravity wave reflection at the top.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.3. Background
Is Required: TRUE Type: ENUM Cardinality: 1.1
Background wave distribution
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.4. Subgrid Scale Orography
Is Required: TRUE Type: ENUM Cardinality: 1.N
Subgrid scale orography effects taken into account.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 46. Gravity Waves --> Orographic Gravity Waves
Gravity waves generated due to the presence of orography
46.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 47. Gravity Waves --> Non Orographic Gravity Waves
Gravity waves generated by non-orographic processes.
47.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the non-orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
"""
Explanation: 47.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 48. Solar
Top of atmosphere solar insolation characteristics
48.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of solar insolation of the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 49. Solar --> Solar Pathways
Pathways for solar forcing of the atmosphere
49.1. Pathways
Is Required: TRUE Type: ENUM Cardinality: 1.N
Pathways for the solar forcing of the atmosphere model domain
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 50. Solar --> Solar Constant
Solar constant and top of atmosphere insolation characteristics
50.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the solar constant.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 50.2. Fixed Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If the solar constant is fixed, enter the value of the solar constant (W m-2).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 50.3. Transient Characteristics
Is Required: TRUE Type: STRING Cardinality: 1.1
solar constant transient characteristics (W m-2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 51. Solar --> Orbital Parameters
Orbital parameters and top of atmosphere insolation characteristics
51.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 51.2. Fixed Reference Date
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Reference date for fixed orbital parameters (yyyy)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 51.3. Transient Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Description of transient orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 51.4. Computation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used for computing orbital parameters.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 52. Solar --> Insolation Ozone
Impact of solar insolation on stratospheric ozone
52.1. Solar Ozone Impact
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does top of atmosphere insolation impact on stratospheric ozone?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 53. Volcanos
Characteristics of the implementation of volcanoes
53.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the implementation of volcanic effects in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 54. Volcanos --> Volcanoes Treatment
Treatment of volcanoes in the atmosphere
54.1. Volcanoes Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How volcanic effects are modeled in the atmosphere.
End of explanation
"""
|
ckmah/pokemon-tutorial | Welcome to Python for Data Science.ipynb | mit | print('This is a cell.')
"""
Explanation: Welcome to Python for Data Science
This a beginner/intermediate level Python tutorial about some of the most popular python packages in data science and scientific analysis.
This notebook was prepared by Clarence Mah. Source and license info is on GitHub.
Adapted from Andrew Gele's and Sang Han's notebooks.
Table of Contents
Jupyter Notebooks
Explore Data with Packages
Data Visualization
<a id='section1'></a>
Jupyter Notebooks
Jupyter notebooks are a medium for creating documents that contain executable code, interactive visualizations, and rich text.
Run the cell below by pressing Ctrl+Enter, or Shift+Enter.
End of explanation
"""
numbers = [1,2,3,4]
# Look at variable
numbers
"""
Explanation: Python runs just as you would expect it to with variables, loops, etc.
End of explanation
"""
range(
"""
Explanation: One of the most valuable tools is tab completion. Place the cursor after range( in the following cell, then try pressing Shift+Tab a couple times slowly, noting how more and more information expands each time.
End of explanation
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# Render plots inline
%matplotlib inline
"""
Explanation: Feel free to experiment with commands and buttons in the toolbar at the top of the notebook. Use notebook shortcuts by exiting Edit Mode (press Esc) and entering Command mode. To get a list of shortcuts, press keyboard shortcut h.
<a id='section2'></a>
<hr>
Explore Data with Packages
The open-source ecosystem around Python enables people to share libraries of code as packages. These are some of the most widely used in data science and scientific computing, including the field of bioinformatics.
numpy - [cheatsheet] A library for manipulating multi-dimensional data, linear algebra, and random number functions.
pandas - [cheatsheet] A library for performing data analysis with "relational" or "labeled" data.
matplotlib - [cheatsheet] A library for plotting figures.
seaborn - [docs] A library based on matplotlib that simplifies plotting with improved aesthetics.
Here, we import the libraries we want to use in this notebook.
End of explanation
"""
# Python list
a_list = [3,2,1]
# Create an array from the list
a = np.array(a_list)
# Create a constant array of 2s, length 3
b = np.full(3, 2, dtype='int')
# Print
a
b
np.sort(a)
"""
Explanation: numpy
Numpy arrays can be created several different ways. Python lists can be converted directly, while numpy also includes some functions for generating simple arrays.
End of explanation
"""
a + b
np.add(a, b)
"""
Explanation: Arrays behave like vectors, where native operators will perform element-wise operations. numpy also includes equivalent functions.
End of explanation
"""
# TODO: Try subtract, multiply and divide on array `a`
"""
Explanation: Try subtract(), multiply(), and divide() with Arrays a and b and compare the results to performing the operations with native operators: - * / .
End of explanation
"""
# TODO: Try mean, median, and std on array `a`
"""
Explanation: numpy also includes handy functions like np.mean(), np.median(), np.std(), etc.
End of explanation
"""
# TODO: Use the appropriate read function from pandas.
# Hint: the data file is a .csv file
path_to_file = 'data/Pokemon.csv'
pokemon = pd.read
"""
Explanation: There are many more useful functions that will prove useful depending on the dimensionality of your data and your specific analysis.
<a id='section2.2'></a>
pandas
We will be taking a look at the Pokemon Stats dataset.
Try tab completing pd.read to see pandas' read functions. Load the pokemon dataset using the appropriate one.
Note: Use the provided path_to_file variable as the path to the data.
End of explanation
"""
# 800 rows, 13 columns
pokemon.shape
"""
Explanation: This data structure that pandas stores the data in is called a DataFrame. Think of it like a matrix, but more flexible.
End of explanation
"""
pokemon.head()
"""
Explanation: That might be a lot to view all at once. Preview the first 5 rows of our data with header and row names.
End of explanation
"""
pokemon.describe()
"""
Explanation: describe() is a great function for a quick statistical summary of the numerical variables in your dataset.
End of explanation
"""
# Select the `Name` column
pokemon['Name']
# OR
pokemon.Name
# Select the first 5 rows
pokemon[:5]
"""
Explanation: We can easily select columns by name and rows by index.
End of explanation
"""
# Returns True of False for each row (pokemon)
pokemon.Attack > 75
# Selects pokemon with > 75 attack
pokemon[pokemon.Attack > 75]
"""
Explanation: A practical use for this would be to select rows and columns conditionally. Here we want to only look at Pokemon with more than 75 Attack Stat.
End of explanation
"""
# TODO: Select Generation 1 pokemon
"""
Explanation: How would you select all the Generation 1 Pokemon?
End of explanation
"""
# TODO: Use the statement you generated from the previous code cell
pokemon = ?
"""
Explanation: <a id='section3'></a>
<hr>
Data Visualization
We're only going to look at first generation Pokemon for the sake of simplicity.
End of explanation
"""
sns.boxplot(pokemon.HP);
"""
Explanation: matplotlib + seaborn
Graphs are a great way to explore data statistics. We can make a boxplot of single variable (column) using seaborn.
End of explanation
"""
sns.boxplot(data=pokemon);
"""
Explanation: Even better, seaborn can automatically make a boxplot for each variable out of the box.
End of explanation
"""
pokemon = pokemon.drop(['#', 'Total', 'Legendary'],axis='columns')
"""
Explanation: Since we only want to look at their stats, some variables are irrelevant. Let's exclude those for now.
Note: If you are confused about a function or there are too many parameters to keep track of, remember to use tab completion for help. Put your cursor after pokemon.drop( and try it out.
End of explanation
"""
# TODO: Drop Generation column here
"""
Explanation: Oops! There's one more variable that we forgot to drop. Let's drop the Generation column since we only have Generation 1 in our DataFrame.
Don't forget to reassign the result to pokemon or the dropped column won't be saved.
End of explanation
"""
pokemon.head()
"""
Explanation: Notice that we now only have relevant columns that are considered a Stat or Pokemon Type.
End of explanation
"""
sns.boxplot(data=pokemon);
"""
Explanation: Great! Let's plot it again with only Stats.
End of explanation
"""
# Transform data for swarmplot
normalized = pd.melt(
pokemon, id_vars=["Name", "Type 1", "Type 2"], var_name="Stat")
normalized.head()
sns.swarmplot(data=normalized, x='Stat', y='value', hue='Type 1');
"""
Explanation: Fancier Plots
We can compare Pokemon stats by type. This particular plot, swarmplot requires data transformed in a certain format. Check out pandas' melt for a description of the transformation.
End of explanation
"""
# Make the plot larger
plt.figure(figsize=(12, 10))
# Adjust the y-axis
plt.ylim(0, 275)
# Organize by type [split], make points larger [size]
sns.swarmplot(
data=normalized, x='Stat', y='value', hue='Type 1', split=True, size=7);
# Move legend out of the way
plt.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.);
"""
Explanation: That looks neat, but seems to be a little cluttered. Using a few Seaborn and Matplotlib functions, we can adjust how our plot looks.
On each line below, we will:
- Make the plot larger
- Adjust the y-axis
- Organize the point distribution by type and make the individual points larger
- Move the legend out of the way
End of explanation
"""
# Set background to white
sns.set_style('whitegrid')
# Make the plot larger
plt.figure(figsize=(12, 10))
# Adjust the y-axis
plt.ylim(0, 275)
# Organize by type [split], make points larger [size]
sns.swarmplot(
data=normalized,
x='Stat',
y='value',
hue='Type 1',
split=True,
size=7,
palette='Set3');
# Move legend out of the way
plt.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.);
"""
Explanation: Make it easier to see types...
End of explanation
"""
# Compare stats by type
figure = sns.factorplot(
data=normalized,
x='Type 1',
y='value',
col='Stat',
col_wrap=2,
aspect=2,
kind='box',
palette='Set3');
# Rotate x-axis tick labels
figure.set_xticklabels(rotation=30);
"""
Explanation: Bonus plot!
Compare Stats by types.
End of explanation
"""
|
turbomanage/training-data-analyst | courses/machine_learning/deepdive2/launching_into_ml/solutions/first_model.ipynb | apache-2.0 | import os
"""
Explanation: First BigQuery ML models for Taxifare Prediction
In this notebook, we will use BigQuery ML to build our first models for taxifare prediction.BigQuery ML provides a fast way to build ML models on large structured and semi-structured datasets.
Learning Objectives
Choose the correct BigQuery ML model type and specify options
Evaluate the performance of your ML model
Improve model performance through data quality cleanup
Create a Deep Neural Network (DNN) using SQL
Each learning objective will correspond to a #TODO in the student lab notebook -- try to complete that notebook first before reviewing this solution notebook.
We'll start by creating a dataset to hold all the models we create in BigQuery
Import libraries
End of explanation
"""
%%bash
export PROJECT=$(gcloud config list project --format "value(core.project)")
echo "Your current GCP Project Name is: "$PROJECT
PROJECT = "your-gcp-project-here" # REPLACE WITH YOUR PROJECT NAME
REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# Do not change these
os.environ["BUCKET"] = PROJECT # DEFAULT BUCKET WILL BE PROJECT ID
os.environ["REGION"] = REGION
if PROJECT == "your-gcp-project-here":
print("Don't forget to update your PROJECT name! Currently:", PROJECT)
"""
Explanation: Set environment variables
End of explanation
"""
%%bash
## Create a BigQuery dataset for serverlessml if it doesn't exist
datasetexists=$(bq ls -d | grep -w serverlessml)
if [ -n "$datasetexists" ]; then
echo -e "BigQuery dataset already exists, let's not recreate it."
else
echo "Creating BigQuery dataset titled: serverlessml"
bq --location=US mk --dataset \
--description 'Taxi Fare' \
$PROJECT:serverlessml
echo "\nHere are your current datasets:"
bq ls
fi
## Create GCS bucket if it doesn't exist already...
exists=$(gsutil ls -d | grep -w gs://${PROJECT}/)
if [ -n "$exists" ]; then
echo -e "Bucket exists, let's not recreate it."
else
echo "Creating a new GCS bucket."
gsutil mb -l ${REGION} gs://${PROJECT}
echo "\nHere are your current buckets:"
gsutil ls
fi
"""
Explanation: Create a BigQuery Dataset and Google Cloud Storage Bucket
A BigQuery dataset is a container for tables, views, and models built with BigQuery ML. Let's create one called serverlessml if we have not already done so in an earlier lab. We'll do the same for a GCS bucket for our project too.
End of explanation
"""
%%bigquery
CREATE OR REPLACE MODEL
serverlessml.model1_rawdata
OPTIONS(input_label_cols=['fare_amount'],
model_type='linear_reg') AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count * 1.0 AS passengers
FROM
`nyc-tlc.yellow.trips`
WHERE
ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 1
"""
Explanation: Model 1: Raw data
Let's build a model using just the raw data. It's not going to be very good, but sometimes it is good to actually experience this.
The model will take a minute or so to train. When it comes to ML, this is blazing fast.
End of explanation
"""
%%bigquery
SELECT * FROM ML.EVALUATE(MODEL serverlessml.model1_rawdata)
"""
Explanation: Once the training is done, visit the BigQuery Cloud Console and look at the model that has been trained. Then, come back to this notebook.
Note that BigQuery automatically split the data we gave it, and trained on only a part of the data and used the rest for evaluation. We can look at eval statistics on that held-out data:
End of explanation
"""
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL serverlessml.model1_rawdata)
"""
Explanation: Let's report just the error we care about, the Root Mean Squared Error (RMSE)
End of explanation
"""
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL serverlessml.model1_rawdata, (
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count * 1.0 AS passengers
FROM
`nyc-tlc.yellow.trips`
WHERE
ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 2
AND trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
))
"""
Explanation: We told you it was not going to be good! Recall that our heuristic got 8.13, and our target is $6.
Note that the error is going to depend on the dataset that we evaluate it on.
We can also evaluate the model on our own held-out benchmark/test dataset, but we shouldn't make a habit of this (we want to keep our benchmark dataset as the final evaluation, not make decisions using it all along the way. If we do that, our test dataset won't be truly independent).
End of explanation
"""
%%bigquery
CREATE OR REPLACE TABLE
serverlessml.cleaned_training_data AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers
FROM
`nyc-tlc.yellow.trips`
WHERE
ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 1
AND trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
%%bigquery
-- LIMIT 0 is a free query; this allows us to check that the table exists.
SELECT * FROM serverlessml.cleaned_training_data
LIMIT 0
%%bigquery
CREATE OR REPLACE MODEL
serverlessml.model2_cleanup
OPTIONS(input_label_cols=['fare_amount'],
model_type='linear_reg') AS
SELECT
*
FROM
serverlessml.cleaned_training_data
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL serverlessml.model2_cleanup)
"""
Explanation: Model 2: Apply data cleanup
Recall that we did some data cleanup in the previous lab. Let's do those before training.
This is a dataset that we will need quite frequently in this notebook, so let's extract it first.
End of explanation
"""
%%bigquery
-- This model type is in alpha, so it may not work for you yet.
-- This training takes on the order of 15 minutes.
CREATE OR REPLACE MODEL
serverlessml.model3b_dnn
OPTIONS(input_label_cols=['fare_amount'],
model_type='dnn_regressor', hidden_units=[32, 8]) AS
SELECT
*
FROM
serverlessml.cleaned_training_data
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL serverlessml.model3b_dnn)
"""
Explanation: Model 3: More sophisticated models
What if we try a more sophisticated model? Let's try Deep Neural Networks (DNNs) in BigQuery:
DNN
To create a DNN, simply specify dnn_regressor for the model_type and add your hidden layers.
End of explanation
"""
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL serverlessml.model3b_dnn, (
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count * 1.0 AS passengers,
'unused' AS key
FROM
`nyc-tlc.yellow.trips`
WHERE
ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 10000)) = 2
AND trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
))
"""
Explanation: Nice!
Evaluate DNN on benchmark dataset
Let's use the same validation dataset to evaluate -- remember that evaluation metrics depend on the dataset. You can not compare two models unless you have run them on the same withheld data.
End of explanation
"""
|
NikitaLoik/machineLearning_andrewNg | notebooks/8_anomaly_detection_recomender_system.ipynb | mit | anomalyfile_path = '../course_materials/ex8data1.mat'
anomalyData = loadmat(anomalyfile_path)
print(anomalyData.keys())
print(anomalyData['X'].shape)
print(anomalyData['Xval'].shape)
print(anomalyData['yval'].shape)
anomalyX = anomalyData['X']
plt.plot(anomalyX[:,:1], anomalyX[:,1:], 'o')
plt.axis('equal')
plt.show
"""
Explanation: 1 Anomaly Detection
End of explanation
"""
def gaussianParameter(X):
return np.mean(X, axis=0), np.cov(X.T)
def gaussianDistribution(X):
mu, Sigma = np.mean(X, axis=0), np.cov(X.T)
detSigma = np.linalg.det(Sigma)
mu, Sigma = gaussianParameter(anomalyX)
print(anomalyX.shape)
print(mu.shape)
print(Sigma.shape)
# def plotMultGaussian():
plt.plot(anomalyX[:,:1], anomalyX[:,1:], 'o', markersize=3, alpha=.3)
minX = 0,30
maxX = 0,30
XX, YY = np.mgrid[0:30:.5, 0:30:.5]
XY = np.empty(XX.shape + (2,))
XY[:, :, 0] = XX
XY[:, :, 1] = YY
multivariateGaussian = multivariate_normal(mu, Sigma)
plt.contour(XX, YY, multivariateGaussian.pdf(XY), [10 ** a for a in range(-22,0,4)])
plt.axis('equal')
plt.show
σ = np.diag(np.var(anomalyX, axis=0))
plt.plot(anomalyX[:,:1], anomalyX[:,1:], 'o', markersize=3, alpha=.3)
minX = 0,30
maxX = 0,30
XX, YY = np.mgrid[0:30:.5, 0:30:.5]
XY = np.empty(XX.shape + (2,))
XY[:, :, 0] = XX
XY[:, :, 1] = YY
multivariateGaussian_1 = multivariate_normal(mu, σ)
plt.contour(XX, YY, multivariateGaussian_1.pdf(XY), [10**a for a in range(-22,0,4)])
plt.axis('equal')
plt.show
"""
Explanation: 1.1 Multivariate Gaussian Distribution
1.1.1 Univariate Gaussian Distribution
$p(x; \mu, \sigma ^2) = \frac{1}{\sqrt{2\pi}\cdot\sigma}exp\Big({-\frac{(x-\mu)^2}{2\sigma^2}}\Big)$, where<br />
$\sigma$ is variance.<br />
1.1.2 Multivariate Gaussian Distribution
$p(x; \mu, \Sigma) = \frac{1}{{(2\pi)^{n/2}}\cdot{|\Sigma|^{1/2}}}exp\Big(-\frac{1}{2}(x-\mu)\Sigma^{-1}(x-\mu)^T\Big)$, where<br />
$\Sigma$ is a covariance matrix (size $k\times k$);<br />
$|\Sigma|$ is the determinant of a covariance matrix $\Sigma$<br />
End of explanation
"""
Xval = anomalyData['Xval']
yval = np.array(anomalyData['yval'], dtype=bool).flatten()
plt.plot(Xval[:,:1], Xval[:,1:], 'o', markersize=3, alpha=.3)
plt.show
multivariateGaussian = multivariate_normal(mu, Sigma)
pX = multivariateGaussian.pdf(anomalyX)
pXmin = np.min(pX)
print(pXmin)
pXmax = np.max(pX)
# print(pXmax)
# pXval = multivariateGaussian.pdf(Xval)
# nSteps = 1
# F_score = 0
# for epsylon in np.linspace(pXmin, pXmax, nSteps):
# anomalyPrediction = pXval <= pXmax
# # print (yval)
# # print(yval.shape)
# # print (anomalyPrediction)
# # print(anomalyPrediction.shape)
# tP = np.sum(yval*anomalyPrediction)
# # print (tP)
# print (yval-anomalyPrediction)
# # fP = np.sum(yval-anomalyPrediction==-1)
# # print (fP)
# # fN = np.sum(yval-anomalyPrediction==1)
# # print (fN)
# # precision = tP/(tP+fP)
# # recall = tP/(tP+fN)
# # F_score = 2*precision*recall/(precision+recall)
# # print(F_score)
"""
Explanation: 1.2 Selecting Threshold for Anomaly Detection
1.2.1 Types of Errors & Accuracy, Precision, Recal
The rate type I error (false positives) is denoted by $\alpha$.<br />
The rate type II error (false negatives) is denoted by $\beta$.<br /><br />
Accuracy $= \frac {tP + tN}{tP + tN + fP + fN}$<br /><br />
Precision $= \frac {tP}{tP + fP}$<br /><br />
Recall $= \frac {tP}{tP + fN}$<br /><br />
F $= \frac {2\cdot Precision\cdot Recall}{Precision+Recall}$
End of explanation
"""
moviefile_path = '../course_materials/ex8_movies.mat'
movieData = loadmat(moviefile_path)
print(movieData.keys())
print(movieData['Y'].shape)
print(movieData['R'].shape)
"""
Explanation: 2 Recommender System
End of explanation
"""
movieRating = movieData['Y']
movieRated = movieData['R']
n_movies, n_users = movieRating.shape
print(np.array_equal(movieRated, movieRating != 0))
(np.ones(movieRating.shape)*5)*(movieRating != 0)
print ("'Toy Story' average rating", np.mean(movieRating[0,np.nonzero(movieRating[0,])]))
fig = plt.figure(figsize=(12, 8))
plt.imshow(movieRating)
plt.colorbar()
plt.ylabel("Movies", fontsize=15)
plt.xlabel("Users", fontsize=15)
"""
Explanation: 2.1 Data Visualisation
movieRating is a movie-rating matrix (1682, 943) — movies number and users number.<br>
movieRated is a matrix of rated movies (1682, 943); movieRated(n,k) = 1 if user k gave a rating to movie n.<br>
NB Strictly speaking, movieRated can be produced from movieRating usign simple operation (movieRating != 0).
End of explanation
"""
movieParametersfile_path = '../course_materials/ex8_movieParams.mat'
movieParameters = loadmat(movieParametersfile_path)
print(movieParameters.keys())
X = movieParameters['X']
β = movieParameters['Theta']
n_movies = int(movieParameters['num_movies'])
n_users = int(movieParameters['num_users'])
nFeatures = int(movieParameters['num_features'])
print(n_movies, n_users, nFeatures)
print(X.shape)
print(β.shape)
"""
Explanation: 2.2 Collaborative-Filtering Learning Algorithm
End of explanation
"""
def flatten_βX(β, X):
return np.concatenate((β.flatten(), X.flatten()))
def reshape_βX(βX, n_movies, n_users):
splitβX = np.split(βX, [int(n_users*βX.size/(n_movies+n_users))])
return(splitβX[0].reshape(n_users,-1), splitβX[1].reshape(n_movies,-1))
def get_hypothesis(β, X, Y):
# for X(n,j), β(k,j), Y(n,k)
# hypothesis matrix h(n,k)= X(n,j)*β.T(j,k)
# to avoid bias, caused by including estimates for the movies, which have not been rated
return np.multiply(np.dot(X,β.T), Y!=0)
def cost_function(βX, Y, n_movies, n_users, λ=.0):
β, X = reshape_βX(βX, n_movies, n_users)
# hypothesis matrix h(n,k)= X(n,j)*β.T(j,k)
H = get_hypothesis(β, X, Y)
# cost scalar J
J = np.sum(np.square(H-Y))/2
# regularisation term
R = λ * (np.sum(np.square(β)) + np.sum(np.square(X)))/2
return J + R
"""
Explanation: 2.2.1 Collaborative-Filtering Cost Function
$J = \frac{1}{2}\sum{(X\cdot \beta^T- y)^2}$
End of explanation
"""
testUsers = 4; testMovies = 5; testFeatures = 3
testX = X[:testMovies,:testFeatures]
testβ = β[:testUsers,:testFeatures]
testY = movieRating[:testMovies,:testUsers]
testβX = flatten_βX(testβ, testX)
print(cost_function(testβX, testY, testMovies, testUsers))
print(cost_function(testβX, testY, testMovies, testUsers, λ=1.5))
"""
Explanation: 2.2.2 Test Collaborative-Filtering Cost Function
You should expect to see an output of 22.22 (Andrew Ng)
End of explanation
"""
def get_gradient(βX, Y, n_movies, n_users, λ=.0):
β, X = reshape_βX(βX, n_movies, n_users)
# hypothesis matrix h(n,k) = X(n,j)*β.T(j,k)
H = get_hypothesis(β, X, Y)
gβ = np.dot((H-Y).T, X)
gX = np.dot(H-Y, β)
# regularisation term
gβ += λ*β
gX += λ*X
return flatten_βX(gβ, gX)
def testGradient(βX, Y, n_movies, n_users, λ=.0):
print('Numerical Gradient \t Gradient \t\t Difference')
Epsilon = 0.0001
iGradient = get_gradient(βX, Y, n_movies, n_users, λ=.0)
for i in np.random.choice(βX.size, 5, replace=False):
# creat epsilon perturbation vector
epsilon = np.zeros(βX.size)
epsilon[i] = Epsilon
negDelta = cost_function(βX-epsilon, Y, n_movies, n_users, λ=.0)
posDelta = cost_function(βX+epsilon, Y, n_movies, n_users, λ=.0)
newGradient = (posDelta-negDelta)/(2*Epsilon)
print ('%0.15f \t %0.15f \t %0.15f' % (newGradient, iGradient[i],newGradient-iGradient[i]))
Y = movieRating
βX = flatten_βX(β, X)
testGradient(βX, Y, n_movies, n_users)
"""
Explanation: 2.2.1 Collaborative-Filtering Gradient Function
$\frac{\partial J}{\partial x} = \sum{(X\cdot \beta^T- y)\cdot\beta}$<br/>
$\frac{\partial J}{\partial \beta} = \sum{(X\cdot \beta^T- y)\cdot x}$
End of explanation
"""
movieList = {}
with open('../course_materials/movie_ids.txt', 'r') as movieFile:
for line in movieFile:
movieList[int(line.strip('\n').split(' ')[0])-1] = ' '.join(line.strip('\n').split(' ')[1:])
testRating = np.zeros((Y.shape[0],1))
testRating[0] = 4
testRating[97] = 2
testRating[6] = 3
testRating[11] = 5
testRating[53] = 4
testRating[63] = 5
testRating[65] = 3
testRating[68] = 5
testRating[182] = 4
testRating[225] = 5
testRating[354] = 5
testRating.shape
# Add testRating to the movieRating (Y)
Y = np.hstack((Y, testRating))
n_movies, n_users = Y.shape
print(n_movies, n_users)
Y - (Y.sum(1)/(Y != 0).sum(1)).reshape(-1,1)
def normaliseY(Y):
# To avoid bias zeros are not taken into account
meanY = Y.sum(1)/(Y != 0).sum(1)
return Y-meanY.reshape(-1,1), meanY.reshape(-1,1)
normalisedY, meanY = normaliseY(Y)
print(Y.shape)
print(meanY.shape)
# Generate random initial parameters, β and X
nFeatures = 10
X = np.random.rand(n_movies,nFeatures)
β = np.random.rand(n_users,nFeatures)
βX = flatten_βX(β, X)
# λ is set to 10 by Andrew Ng
λ = 10.
# cost_function optimisation
result = sp.optimize.fmin_cg(cost_function, x0=βX, fprime=get_gradient,
args=(Y, n_movies, n_users, λ),
maxiter=50,disp=True,full_output=True)
# Reshape the trained output into sensible "X" and "β" matrices
optβ, optX = reshape_βX(result[0], n_movies, n_users)
print(optX.shape)
print(optβ.shape)
# After training the model, now make recommendations by computing
# the predictions matrix
predictionRating = np.dot(optX, optβ.T)
print(predictionRating[:,-1].shape)
# Grab the last user's predictions (since I put my predictions at the
# end of the Y matrix, not the front)
# Add back in the mean movie ratings
testPrediction = predictionRating[:,-1] + meanY.flatten()
# print(testPrediction)
# Sort my predictions from highest to lowest
predictionIndicesSorted = np.argsort(testPrediction)
predictionIndicesSorted[:] = predictionIndicesSorted[::-1]
print ("Top recommendations for you:")
for i in range(10):
print ('Predicting rating %0.1f for movie %s.' %
(testPrediction[predictionIndicesSorted[i]],movieList[predictionIndicesSorted[i]]))
print ("\nOriginal ratings provided:")
for i in range(len(testRating)):
if testRating[i] > 0:
print ('Rated %d for movie %s.' % (testRating[i],movieList[i]))
"""
Explanation: 2.3 Learning Movie Recommendations
End of explanation
"""
|
IS-ENES-Data/submission_forms | test/forms/CORDEX/CORDEX_tt_tt1.ipynb | apache-2.0 | from dkrz_forms import form_widgets
form_widgets.show_status('form-submission')
"""
Explanation: CORDEX ESGF submission form
General Information
Data to be submitted for ESGF data publication must follow the rules outlined in the Cordex Archive Design Document <br /> (https://verc.enes.org/data/projects/documents/cordex-archive-design)
Thus file names have to follow the pattern:<br />
VariableName_Domain_GCMModelName_CMIP5ExperimentName_CMIP5EnsembleMember_RCMModelName_RCMVersionID_Frequency[_StartTime-EndTime].nc <br />
Example: tas_AFR-44_MPI-M-MPI-ESM-LR_rcp26_r1i1p1_MPI-CSC-REMO2009_v1_mon_yyyymm-yyyymm.nc
The directory structure in which these files are stored follow the pattern:<br />
activity/product/Domain/Institution/
GCMModelName/CMIP5ExperimentName/CMIP5EnsembleMember/
RCMModelName/RCMVersionID/Frequency/VariableName <br />
Example: CORDEX/output/AFR-44/MPI-CSC/MPI-M-MPI-ESM-LR/rcp26/r1i1p1/MPI-CSC-REMO2009/v1/mon/tas/tas_AFR-44_MPI-M-MPI-ESM-LR_rcp26_r1i1p1_MPI-CSC-REMO2009_v1_mon_yyyymm-yyyymm.nc
Notice: If your model is not yet registered, please contact contact [email protected]
specifying: Full institution name, Short institution name (acronym), Contact person and
e-mail, RCM Name (acronym), Terms of Use (unrestricted or non-commercial only) and the CORDEX domains in which you are interested.
At some CORDEX ESGF data centers a 'data submission form' is in use in order to improve initial information exchange between data providers and the data center. The form has to be filled before the publication process can be started. In case you have questions pleas contact the individual data centers:
o at DKRZ: [email protected]
o at SMHI: [email protected]
End of explanation
"""
MY_LAST_NAME = "tt" # e.gl MY_LAST_NAME = "schulz"
#-------------------------------------------------
from dkrz_forms import form_handler, form_widgets, checks
form_info = form_widgets.check_pwd(MY_LAST_NAME)
sfg = form_handler.init_form(form_info)
sf = sfg.sub.entity_out.report
"""
Explanation: Start submission procedure
The submission is based on this interactive document consisting of "cells" you can modify and then evaluate
evaluation of cells is done by selecting the cell and then press the keys "Shift" + "Enter"
<br /> please evaluate the following cell to initialize your form
End of explanation
"""
sf.submission_type = "..." # example: sf.submission_type = "initial_version"
"""
Explanation: please provide information on the contact person for this CORDEX data submission request
Type of submission
please specify the type of this data submission:
- "initial_version" for first submission of data
- "new _version" for a re-submission of previousliy submitted data
- "retract" for the request to retract previously submitted data
End of explanation
"""
sf.institution = "..." # example: sf.institution = "Alfred Wegener Institute"
"""
Explanation: Requested general information
Please provide model and institution info as well as an example of a file name
institution
The value of this field has to equal the value of the optional NetCDF attribute 'institution'
(long version) in the data files if the latter is used.
End of explanation
"""
sf.institute_id = "..." # example: sf.institute_id = "AWI"
"""
Explanation: institute_id
The value of this field has to equal the value of the global NetCDF attribute 'institute_id'
in the data files and must equal the 4th directory level. It is needed before the publication
process is started in order that the value can be added to the relevant CORDEX list of CV1
if not yet there. Note that 'institute_id' has to be the first part of 'model_id'
End of explanation
"""
sf.model_id = "..." # example: sf.model_id = "AWI-HIRHAM5"
"""
Explanation: model_id
The value of this field has to be the value of the global NetCDF attribute 'model_id'
in the data files. It is needed before the publication process is started in order that
the value can be added to the relevant CORDEX list of CV1 if not yet there.
Note that it must be composed by the 'institute_id' follwed by the RCM CORDEX model name,
separated by a dash. It is part of the file name and the directory structure.
End of explanation
"""
sf.experiment_id = "..." # example: sf.experiment_id = "evaluation"
# ["value_a","value_b"] in case of multiple experiments
sf.time_period = "..." # example: sf.time_period = "197901-201412"
# ["time_period_a","time_period_b"] in case of multiple values
"""
Explanation: experiment_id and time_period
Experiment has to equal the value of the global NetCDF attribute 'experiment_id'
in the data files. Time_period gives the period of data for which the publication
request is submitted. If you intend to submit data from multiple experiments you may
add one line for each additional experiment or send in additional publication request sheets.
End of explanation
"""
sf.example_file_name = "..." # example: sf.example_file_name = "tas_AFR-44_MPI-M-MPI-ESM-LR_rcp26_r1i1p1_MPI-CSC-REMO2009_v1_mon_yyyymm-yyyymm.nc"
# Please run this cell as it is to check your example file name structure
# to_do: implement submission_form_check_file function - output result (attributes + check_result)
form_handler.cordex_file_info(sf,sf.example_file_name)
"""
Explanation: Example file name
Please provide an example file name of a file in your data collection,
this name will be used to derive the other
End of explanation
"""
sf.grid_mapping_name = "..." # example: sf.grid_mapping_name = "rotated_latitude_longitude"
"""
Explanation: information on the grid_mapping
the NetCDF/CF name of the data grid ('rotated_latitude_longitude', 'lambert_conformal_conic', etc.),
i.e. either that of the native model grid, or 'latitude_longitude' for the regular -XXi grids
End of explanation
"""
sf.grid_as_specified_if_rotated_pole = "..." # example: sf.grid_as_specified_if_rotated_pole = "yes"
"""
Explanation: Does the grid configuration exactly follow the specifications in ADD2 (Table 1)
in case the native grid is 'rotated_pole'? If not, comment on the differences; otherwise write 'yes' or 'N/A'. If the data is not delivered on the computational grid it has to be noted here as well.
End of explanation
"""
sf.data_qc_status = "..." # example: sf.data_qc_status = "QC2-CORDEX"
sf.data_qc_comment = "..." # any comment of quality status of the files
"""
Explanation: Please provide information on quality check performed on the data you plan to submit
Please answer 'no', 'QC1', 'QC2-all', 'QC2-CORDEX', or 'other'.
'QC1' refers to the compliancy checker that can be downloaded at http://cordex.dmi.dk.
'QC2' refers to the quality checker developed at DKRZ.
If your answer is 'other' give some informations.
End of explanation
"""
sf.terms_of_use = "..." # example: sf.terms_of_use = "unrestricted"
"""
Explanation: Terms of use
Please give the terms of use that shall be asigned to the data.
The options are 'unrestricted' and 'non-commercial only'.
For the full text 'Terms of Use' of CORDEX data refer to
http://cordex.dmi.dk/joomla/images/CORDEX/cordex_terms_of_use.pdf
End of explanation
"""
sf.directory_structure = "..." # example: sf.directory_structure = "compliant"
"""
Explanation: Information on directory structure and data access path
(and other information needed for data transport and data publication)
If there is any directory structure deviation from the CORDEX standard please specify here.
Otherwise enter 'compliant'. Please note that deviations MAY imply that data can not be accepted.
End of explanation
"""
sf.data_path = "..." # example: sf.data_path = "mistral.dkrz.de:/mnt/lustre01/work/bm0021/k204016/CORDEX/archive/"
sf.data_information = "..." # ...any info where data can be accessed and transfered to the data center ... "
"""
Explanation: Give the path where the data reside, for example:
blizzard.dkrz.de:/scratch/b/b364034/. If not applicable write N/A and give data access information in the data_information string
End of explanation
"""
sf.exclude_variables_list = "..." # example: sf.exclude_variables_list=["bnds", "vertices"]
"""
Explanation: Exclude variable list
In each CORDEX file there may be only one variable which shall be published and searchable at the ESGF portal (target variable). In order to facilitate publication, all non-target variables are included in a list used by the publisher to avoid publication. A list of known non-target variables is [time, time_bnds, lon, lat, rlon ,rlat ,x ,y ,z ,height, plev, Lambert_Conformal, rotated_pole]. Please enter other variables into the left field if applicable (e.g. grid description variables), otherwise write 'N/A'.
End of explanation
"""
sf.uniqueness_of_tracking_id = "..." # example: sf.uniqueness_of_tracking_id = "yes"
"""
Explanation: Uniqueness of tracking_id and creation_date
In case any of your files is replacing a file already published, it must not have the same tracking_id nor
the same creation_date as the file it replaces.
Did you make sure that that this is not the case ?
Reply 'yes'; otherwise adapt the new file versions.
End of explanation
"""
sf.variable_list_day = [
"clh","clivi","cll","clm","clt","clwvi",
"evspsbl","evspsblpot",
"hfls","hfss","hurs","huss","hus850",
"mrfso","mrro","mrros","mrso",
"pr","prc","prhmax","prsn","prw","ps","psl",
"rlds","rlus","rlut","rsds","rsdt","rsus","rsut",
"sfcWind","sfcWindmax","sic","snc","snd","snm","snw","sund",
"tas","tasmax","tasmin","tauu","tauv","ta200","ta500","ta850","ts",
"uas","ua200","ua500","ua850",
"vas","va200","va500","va850","wsgsmax",
"zg200","zg500","zmla"
]
sf.variable_list_mon = [
"clt",
"evspsbl",
"hfls","hfss","hurs","huss","hus850",
"mrfso","mrro","mrros","mrso",
"pr","psl",
"rlds","rlus","rlut","rsds","rsdt","rsus","rsut",
"sfcWind","sfcWindmax","sic","snc","snd","snm","snw","sund",
"tas","tasmax","tasmin","ta200",
"ta500","ta850",
"uas","ua200","ua500","ua850",
"vas","va200","va500","va850",
"zg200","zg500"
]
sf.variable_list_sem = [
"clt",
"evspsbl",
"hfls","hfss","hurs","huss","hus850",
"mrfso","mrro","mrros","mrso",
"pr","psl",
"rlds","rlus","rlut","rsds","rsdt","rsus","rsut",
"sfcWind","sfcWindmax","sic","snc","snd","snm","snw","sund",
"tas","tasmax","tasmin","ta200","ta500","ta850",
"uas","ua200","ua500","ua850",
"vas","va200","va500","va850",
"zg200","zg500"
]
sf.variable_list_fx = [
"areacella",
"mrsofc",
"orog",
"rootd",
"sftgif","sftlf"
]
"""
Explanation: Variable list
list of variables submitted -- please remove the ones you do not provide:
End of explanation
"""
# simple consistency check report for your submission form
res = form_handler.check_submission(sf)
sf.sub.valid_submission = res['valid_submission']
form_handler.DictTable(res)
"""
Explanation: Check your submission form
Please evaluate the following cell to check your submission form.
In case of errors, please go up to the corresponden information cells and update your information accordingly.
End of explanation
"""
form_handler.save_form(sf,"..my comment..") # edit my comment info
#evaluate this cell if you want a reference to the saved form emailed to you
# (only available if you access this form via the DKRZ form hosting service)
form_handler.email_form_info()
# evaluate this cell if you want a reference (provided by email)
# (only available if you access this form via the DKRZ hosting service)
form_handler.email_form_info(sf)
"""
Explanation: Save your form
your form will be stored (the form name consists of your last name plut your keyword)
End of explanation
"""
form_handler.email_form_info(sf)
form_handler.form_submission(sf)
"""
Explanation: officially submit your form
the form will be submitted to the DKRZ team to process
you also receive a confirmation email with a reference to your online form for future modifications
End of explanation
"""
|
darioizzo/optimal_landing | examples/2 - Training - indirect method.ipynb | lgpl-3.0 | import matplotlib.pyplot as plt
%matplotlib inline
import sys
sys.path.append('../')
import numpy as np
import deep_control as dc
import pandas
import seaborn as sns
"""
Explanation: Training deep neural networks
@cesans
End of explanation
"""
import glob
import pickle
from tqdm import tqdm
files = glob.glob('../data/simple/*pic')
total = 0
trajs = []
for f in tqdm(files, leave=True):
rw = pickle.load(open(f,'rb'))
for rwi in rw:
traj = np.hstack((rwi[0], rwi[1]))
df = pandas.DataFrame(data=traj)
col_names = ['t', 'x', 'y', 'z', 'vz', 'm', 'u1', 'u2']
df.columns = col_names
trajs.append(df)
ini_ps = np.vstack([t.values[0,:] for t in trajs])
for i in range(3):
for j in range(2):
if i!=0 or j!= 0:
plt.subplot(2,3,i*2+j)
plt.hist(ini_ps[:,i*2+j],59)
plt.locator_params(nbins=4)
plt.tight_layout()
"""
Explanation: Loading data
Previously generated trajectories can be loaded with dc.data.load_trajectories
End of explanation
"""
train_p = 0.9 # proportion of training data
x_train, y_train, x_test, y_test, idx_train = dc.data.create_training_data(trajs, train_p = train_p, n_outputs=2)
dc.nn.save_training_data([x_train, y_train, x_test, y_test, idx_train], "indirect_simple")
"""
Explanation: Training
From the trajectories we can generate the training sets:
End of explanation
"""
model_description = {"data": "indirect_simple",
"control": dc.nn.DTHETA,
"nlayers": 3,
"units": 32,
"output_mode": dc.nn.OUTPUT_LOG,
"dropout": False,
"batch_size": 8,
"epochs": 32,
"lr": 0.001,
"input_vars" : 5,
"hidden_nonlinearity": "ReLu"}
dc.nn.train(model_description)
model_description = {"data": "indirect_simple",
"control": dc.nn.THRUST,
"nlayers": 3,
"units": 32,
"output_mode": dc.nn.OUTPUT_LOG,
"dropout": False,
"batch_size": 8,
"epochs": 32,
"lr": 0.001,
"input_vars" : 5,
"hidden_nonlinearity": "ReLu"}
dc.nn.train(model_description)
model_th = dc.nn.load_model('nets/indirect_simple/0/ReLu_outputLog_3_32.model')
network_th = dc.nn.load_network(model_th, base_dir='')
model_dth = dc.nn.load_model('nets/indirect_simple/1/ReLu_outputLog_3_32.model')
network_dth = dc.nn.load_network(model_dth, base_dir='')
networks = [(model_th, network_th),
(model_dth, network_dth)]
data = dc.nn.load_training_data(model_dth, '')
plt.rcParams['figure.figsize'] = [20,5*len(networks)]
fig = plt.figure()
batch = 1000
b = 10
traj_length = 100
for s, dset in enumerate(['train', 'test']):
for i,(model,network) in enumerate(networks):
plt.subplot(len(networks), 2, i*2+s+1)
u = np.zeros((batch,len(networks)))
network_input = data['X_'+dset][b*batch:(b+1)*batch,:]
ui = network['pred'](network_input)
u[:,i] = ui[:,0]
u = dc.nn.postprocess(model, u)
u_gt = data['Y_'+dset][b*batch:(b+1)*batch,:].copy()
u_gt = dc.nn.postprocess(model, u_gt)
for j in range(5):
label_1, = plt.plot(np.arange(traj_length)+j*traj_length,u_gt[j*traj_length:(j+1)*traj_length,i], c=sns.color_palette()[0])
plt.gca().get_xaxis().set_ticks([])
plt.ylabel(r'$' + 'u_'+str(i+1)+'$')
ylims = plt.ylim()
plt.subplot(len(networks), 2, i*2+s+1)
for j in range(5):
label_2, = plt.plot(np.arange(traj_length)+j*traj_length, u[j*traj_length:(j+1)*traj_length,i], c=sns.color_palette()[1])
#plt.locator_params(axis='y', nbins=1)
plt.gca().get_xaxis().set_ticks([])
plt.ylim(ylims)
plt.xlabel(dset)
plt.figlegend([label_1,label_2], ['Optimal control', 'DNN predictions'], loc='upper center', ncol=2)
from lasagne import layers
params = layers.get_all_params(network_dth['layers'])
params = [(params[2*p].get_value(),
params[2*p+1].get_value()) for p in range(int(len(params)/2))]
f = open('nn.params', 'wt')
f.write('L{0}\n'.format(len(params)))
for i, layer in enumerate(params):
f.write('W{0},{1},{2}\n'.format(i,layer[0].shape[1],layer[0].shape[0]))
for j in range(layer[0].shape[1]):
for k in range(layer[0].shape[0]):
f.write('{0:.16}\t'.format(layer[0][k,j]))
f.write('\n')
f.write('b{0},{1},{2}\n'.format(i,1,layer[1].shape[0]))
for w in layer[1]:
f.write('{0:.16}\t\n'.format(w))
f.close()
x = np.atleast_2d([0,2,0,1,0])
network_dth['pred'](x)
"""
Explanation: We specify a model to train
End of explanation
"""
|
dchandan/rebound | ipython_examples/Testparticles.ipynb | gpl-3.0 | import rebound
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(m=1e-3, a=1, e=0.05)
sim.move_to_com()
sim.integrator = "whfast"
sim.dt = 0.05
sim.status()
"""
Explanation: Test particles
In this tutorial, we run a simulation with many test particles. Test particles have no mass and therefore do not perturb other particles. A simulation with test particles can be much faster, because it scales as $\mathcal{O}(N)$ compared to a simulation with massive particles, which scales as $\mathcal{O}(N^2)$.
But let's first set up two massive particles in REBOUND, move to the center of mass frame, and choose WHFast as the integrator.
End of explanation
"""
import numpy as np
N_testparticle = 1000
a_initial = np.linspace(1.1, 3, N_testparticle)
for a in a_initial:
sim.add(a=a,anom=np.random.rand()*2.*np.pi) # mass is set to 0 by default, random true anomaly
"""
Explanation: Next, we'll add the test particles. We just set the mass to zero. If you give the function rebound.add() no m=NUMBER argument, then it assume the mass is zero. We randomize the true anomaly of the particles and place them outside the massive planet.
Note that test-particles must be added after all massive planets have been added.
End of explanation
"""
sim.N_active = 2
"""
Explanation: Next, we set the N_active variable of REBOUND to the number of active particles in our simulation. Here, we have two active (massive) particles, the star and the planet.
End of explanation
"""
t_max = 200.*2.*np.pi
N_out = 10
xy = np.zeros((N_out, N_testparticle, 2))
times = np.linspace(0, t_max, N_out)
for i, time in enumerate(times):
sim.integrate(time)
for j, p in enumerate(sim.particles[2:]):
xy[i][j] = [p.x, p.y]
"""
Explanation: Next, let's do the simulation. We will run it for 200 orbits of the planet which, in our units of $G=1$, is $t_{\rm max} = 200\cdot2\pi$. While we run the simulation, we'll keep store the position of all test particles 10 times during the interval.
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(5,5))
ax = plt.subplot(111)
ax.set_xlim([-3,3])
ax.set_ylim([-3,3])
plt.scatter(xy[:,:,0],xy[:,:,1],marker=".");
"""
Explanation: We now plot the test particles' positions.
End of explanation
"""
orbits = sim.calculate_orbits()[1:]
a_final = [o.a for o in orbits]
fig = plt.figure(figsize=(15,5))
ax = plt.subplot(111)
ax.set_yscale('log')
ax.set_xlabel(r"period ratio $r$")
ax.set_ylabel("relative semi-major axis change")
plt.plot(np.power(a_initial,1.5),(np.fabs(a_final-a_initial)+1.0e-16)/a_initial,marker=".");
"""
Explanation: One can see that some particles changed their orbits quite significantly, while others seem to stay roughly on circular orbits. To investigate this a bit further, we now calculate and plot the relative change of the test particles' semi-major axis over the duration of the simulation. We'll plot it as a function of the initial period ratio $r=P_{\rm test particle}/P_{\rm planet}$ for which we make use of Kepler's law, $P = 2\pi\sqrt{a^3/GM}$.
End of explanation
"""
e_final = np.array([o.e for o in orbits])
fig = plt.figure(figsize=(15,5))
ax = plt.subplot(111)
#ax.set_ylim([0,1])
ax.set_yscale('log')
ax.set_xlabel(r"period ratio $r$")
ax.set_ylabel("final eccentricity")
plt.plot(np.power(a_initial,1.5),e_final+1.0e-16,marker=".");
"""
Explanation: Very close to the planet test particles change their semi-major axis by order unity. These particles have a close encounter with the planet and get scattered.
We also see two peaks at $r=2$ and $r=3$. These correspond to mean motion resonances. We can also see the mean motion resonances by plotting the eccentricities of the particles.
End of explanation
"""
|
julienchastang/unidata-python-workshop | failing_notebooks/Siphon Radar Server.ipynb | mit | from siphon.catalog import TDSCatalog
cat = TDSCatalog('http://thredds.ucar.edu/thredds/radarServer/catalog.xml')
list(cat.catalog_refs)
"""
Explanation: <div style="width:1000 px">
<div style="float:right; width:98 px; height:98px;">
<img src="https://raw.githubusercontent.com/Unidata/MetPy/master/metpy/plots/_static/unidata_150x150.png" alt="Unidata Logo" style="height: 98px;">
</div>
<h1>Using Siphon to get NEXRAD Level 3 data from a TDS</h1>
<h3>Unidata Python Workshop</h3>
<div style="clear:both"></div>
</div>
<hr style="height:2px;">
<div style="float:right; width:250 px"><img src="https://upload.wikimedia.org/wikipedia/commons/4/4d/Siphoning.JPG" alt="Siphoning" style="height: 300px;"></div>
Objectives
Learn more about Siphon
Use the RadarServer class to retrieve radar data from a TDS
Plot this data using numpy arrays and matplotlib
In this example, we'll focus on interacting with the Radar Query Service to retrieve radar data.
But first!
Bookmark these resources for when you want to use Siphon later!
+ latest Siphon documentation
+ Siphon github repo
+ TDS documentation
Querying the server
First, we point at the top level of the Radar Query Service (the "Radar Server") to see what radar collections are available:
End of explanation
"""
from siphon.radarserver import RadarServer
rs = RadarServer(cat.catalog_refs['NEXRAD Level III Radar from IDD'].href)
"""
Explanation: Next we create an instance of the RadarServer object to point at one of these collections. This downloads some top level metadata and sets things up so we can easily query the server.
End of explanation
"""
print(sorted(rs.variables))
"""
Explanation: We can use rs.variables to see a list of radar products available to view from this access URL.
End of explanation
"""
sorted(rs.metadata['variables'])
"""
Explanation: If you're not a NEXRAD radar expert, there is more information available within the metadata downloaded from the server. (NOTE: Only the codes above are valid for queries.)
End of explanation
"""
print(sorted(rs.stations))
rs.stations['TLX']
"""
Explanation: We can also see a list of the stations. Each station has associated location information.
End of explanation
"""
from datetime import datetime
query = rs.query()
query.stations('TLX').time(datetime.utcnow()).variables('N0Q')
"""
Explanation: Next, we'll create a new query object to help request the data. Using the chaining methods, let's ask for reflectivity data at the lowest tilt (NOQ) from radar TLX (Oklahoma City) for the current time. We see that when the query is represented as a string, it shows the encoded URL.
End of explanation
"""
rs.validate_query(query)
"""
Explanation: The query also supports time range queries, queries for closest to a lon/lat point, or getting all radars within a lon/lat box.
We can use the RadarServer instance to check our query, to make sure we have required parameters and that we have chosen valid station(s) and variable(s).
End of explanation
"""
catalog = rs.get_catalog(query)
"""
Explanation: Make the request, which returns an instance of TDSCatalog. This handles parsing the catalog
End of explanation
"""
catalog.datasets
"""
Explanation: We can look at the datasets on the catalog to see what data we found by the query. We find one NIDS file in the return
End of explanation
"""
ds = list(catalog.datasets.values())[0]
ds.access_urls
"""
Explanation: Exercise: Querying the radar server
We'll work through doing some more queries on the radar server. Some useful links:
- RadarQuery documentation
- Documentation on Python's datetime.timedelta
See if you can write Python code for the following queries:
Get ZDR (differential reflectivity) for 3 days ago from the radar nearest to Hays, KS (lon -99.324403, lat 38.874929). No map necessary!
Get base reflectivity for the last two hours from all of the radars in Wyoming (call it the bounding box with lower left corner 41.008717, -111.056360 and upper right corner 44.981008, -104.042719)
Pulling out the data
We can pull that dataset out of the dictionary and look at the available access URLs. We see URLs for OPeNDAP, CDMRemote, and HTTPServer (direct download).
End of explanation
"""
from siphon.cdmr import Dataset
data = Dataset(ds.access_urls['CdmRemote'])
"""
Explanation: We'll use the CDMRemote reader in Siphon and pass it the appropriate access URL. (This will all behave identically to using the 'OPENDAP' access, if we replace the Dataset from Siphon with that from netCDF4).
End of explanation
"""
list(data.variables)
"""
Explanation: The CDMRemote reader provides an interface that is almost identical to the usual python NetCDF interface.
End of explanation
"""
rng = data.variables['gate'][:]
az = data.variables['azimuth'][:]
ref = data.variables['BaseReflectivityDR'][:]
"""
Explanation: We pull out the variables we need for azimuth and range, as well as the data itself.
End of explanation
"""
import numpy as np
x = rng * np.sin(np.deg2rad(az))[:, None]
y = rng * np.cos(np.deg2rad(az))[:, None]
ref = np.ma.array(ref, mask=np.isnan(ref))
"""
Explanation: Then convert the polar coordinates to Cartesian using numpy
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
import cartopy
import cartopy.feature as cfeature
from metpy.plots import ctables # For NWS colortable
# Create projection centered on the radar. This allows us to use x
# and y relative to the radar.
proj = cartopy.crs.LambertConformal(central_longitude=data.RadarLongitude,
central_latitude=data.RadarLatitude)
# New figure with specified projection
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1, projection=proj)
ax.add_feature(cfeature.STATES.with_scale('50m'), linewidth=2)
# Set limits in lat/lon space
ax.set_extent([data.RadarLongitude - 2.5, data.RadarLongitude + 2.5,
data.RadarLatitude - 2.5, data.RadarLatitude + 2.5])
# Get the NWS typical reflectivity color table, along with an appropriate norm that
# starts at 5 dBz and has steps in 5 dBz increments
norm, cmap = ctables.registry.get_with_steps('NWSReflectivity', 5, 5)
mesh = ax.pcolormesh(x, y, ref, cmap=cmap, norm=norm, zorder=0)
"""
Explanation: Finally, we plot them up using matplotlib and cartopy.
End of explanation
"""
|
ianozsvald/example_conversion_of_excel_to_pandas | Load and Manipulate Sheet by Adding Logic.ipynb | mit | import pandas as pd
df = pd.read_excel("sheet_1_without_simple_logic.xls")
print(df)
# note the NaN (not-a-number) cells when we have no value
df.head(10) # this creates a Table view (non-interactive but prettier)
print("Column names:", df.columns)
print("Information about each row including data types:")
print("(note - type 'object' is catch-all that includes strings)")
df.info()
print("\nWe can extract a column of data as a Series object:")
print(df['Feature1'])
row = df.ix[0]
print("\nWe can extract a row as a Python dictionary:")
print(row)
print("\nRow items, e.g. Feature1={feature1}".format(feature1=row['Feature1']))
def decision_f1_f2(row):
feature_1 = row['Feature1']
feature_2 = row['Feature2']
if feature_1 > 0.5:
if feature_2 > 0.5:
return True
return False
# we'll use apply on the entire DataFrame, axis=1 means row-wise (not column-wise)
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.apply.html
df.apply(decision_f1_f2, axis=1)
# note this doesn't change the DataFrame, it generates a new separate Series
# and here we just print it and then discard it
df.head(10) # confirm we haven't modified the DataFrame
# we can assign the result back to the DataFrame as a new column
new_result = df.apply(decision_f1_f2, axis=1)
df['DecisionF1F2'] = new_result
df.head(10)
def decision_tvshow(cell):
cleaned_cell = cell
cleaned_cell = cleaned_cell.lower() # lowercase
is_hollyoaks = cleaned_cell == "hollyoaks"
return is_hollyoaks
# we can assign the result back to the DataFrame as a new column
df['Decision2'] = df['TVShow'].apply(decision_tvshow)
df.head(10)
"""
Explanation: Load a Sheet, assume there's no logic, we'll add the logic
You'll have run jupyter notebook at the command line or via the Windows Anaconda tool.
In this demo we add logic in place of Excel cell functions and export a finished sheet for comparison.
End of explanation
"""
def decision_tvshow_is_hollyoaks(cell):
cleaned_cell = cell
cleaned_cell = cleaned_cell.lower() # lowercase
cleaned_cell = cleaned_cell.strip() # remove superflous whitespace
is_hollyoaks = cleaned_cell == "hollyoaks"
return is_hollyoaks
# we can assign the result back to the DataFrame as a new column
df['Decision2'] = df['TVShow'].apply(decision_tvshow_is_hollyoaks)
df.head(10)
"""
Explanation: Add a strip to remove whitespace from the "Hollyoaks " example
End of explanation
"""
# use a different way to access the columns (using .colname - this only works for
# easy-to-read ASCII names, it won't work with funny characters)
# and use the logical and (&) to do pairwise logic, assigning the result to our new column
df['Decision3'] = df.DecisionF1F2 & df.Decision2
df.head(10)
"""
Explanation: Combine DecisionF1F2 and Decision2
End of explanation
"""
writer = pd.ExcelWriter('sheet_1_with_added_logic_generated_via_pandas.xlsx', engine='xlsxwriter')
df.to_excel(writer, index=False, sheet_name='Our New Sheet')
workbook = writer.book
worksheet = writer.sheets['Our New Sheet']
writer.save()
# note we could add lots of conditional formatting for Excel via:
# http://pbpython.com/improve-pandas-excel-output.html
"""
Explanation: Output a new XLS
End of explanation
"""
from fuzzywuzzy import fuzz
# http://pbpython.com/excel-pandas-comp.html side reading on fuzzywuzzy
# http://chairnerd.seatgeek.com/fuzzywuzzy-fuzzy-string-matching-in-python/ other fuzzywuzzy metrics for
# other ways of measuring similarity
# quick demo:
item1 = "hollyoaks"
target = "hollyoaks"
print(target, fuzz.ratio(item1, target))
target = 'holly-oaks'
print(target, fuzz.ratio(item1, target))
target = 'holly oak'
print(target, fuzz.ratio(item1, target))
target = "tv's best moments"
print(target, fuzz.ratio(item1, target))
def decision_tvshow_is_hollyoaks_with_smarts(cell):
cleaned_cell = cell
cleaned_cell = cleaned_cell.lower() # lowercase
cleaned_cell = cleaned_cell.strip() # remove superflous whitespace
is_hollyoaks = fuzz.ratio("hollyoaks", cleaned_cell) > 85
return is_hollyoaks
# we can assign the result back to the DataFrame as a new column
df['TVShow'].apply(decision_tvshow_is_hollyoaks_with_smarts)
df.head(10) # show that we *haven't* yet manipulated the DataFrame
df['Decision2'] = df['TVShow'].apply(decision_tvshow_is_hollyoaks_with_smarts)
df.head(10)
# note that we *haven't re-run Decision3*!
df['Decision3'] = df.DecisionF1F2 & df.Decision2
df.head(10)
# We could copy the Excel code down here and write out a new spreadsheet to disk...
"""
Explanation: Let's be clever - we'll do some text processing
We'll try the external fuzzywuzzy library to do some very simple text processing.
If needed install using conda install -c wpb fuzzywuzzy.
End of explanation
"""
|
eco32i/biodata | sessions/examples/CE PCA.ipynb | mit | !head ../../data/CE_exp.umi.tab
!tail ../../data/CE_exp.umi.tab
"""
Explanation: Read in expression matrix
mRNA-Seq from 10 individual C.elegans worms. Processed with CEL-Seq-pipeline (https://github.com/eco32i/CEL-Seq-pipeline)
End of explanation
"""
ce = pd.read_csv('../../data/CE_exp.umi.tab', sep='\t', skipfooter=5, engine='python')
ce
"""
Explanation: Expression matrix contains read counts in genes. Columns are worms rows are genes.
End of explanation
"""
#ce = ce.ix[ce.ix[:,1:].mean(axis=1)>500,:]
X_std = StandardScaler().fit_transform(ce.iloc[:,1:].values.T)
X_std
sklearn_pca = sklearnPCA(n_components=10)
Y_sklearn = sklearn_pca.fit_transform(X_std)
Y_sklearn
"""
Explanation: PCA is sensitive to variable scaling. Therefore before performing the analysis we need to normalize the data. StandardScaler will transform every variable to unti space (mean 0, variance 1). Note also that sklearn expects columns to be genes (features) and rows to be worms (samples, or observations). Therefore we transpose the matrix before doing anything.
End of explanation
"""
sklearn_pca.explained_variance_
sklearn_pca.explained_variance_ratio_
vdf = pd.DataFrame()
vdf['PC'] = [(i+1) for i,x in enumerate(sklearn_pca.explained_variance_ratio_)]
vdf['var'] = sklearn_pca.explained_variance_ratio_
(ggplot(vdf, aes(x='PC', y='var'))
+ geom_point(size=5, alpha=0.4)
+ ylab('Explained variance')
+ theme(figure_size=(12,10))
)
pca_df = pd.DataFrame()
pca_df['sample'] = ['CE_%i' % (x+1) for x in range(10)]
pca_df['PC1'] = Y_sklearn[:,0]
pca_df['PC2'] = Y_sklearn[:,1]
(ggplot(pca_df, aes(x='PC1', y='PC2', color='sample'))
+ geom_point(size=5, alpha=0.5)
+ theme(figure_size=(12,10))
)
pca_df = pd.DataFrame()
pca_df['sample'] = ['CE_%i' % (x+1) for x in range(10)]
pca_df['PC1'] = Y_sklearn[:,0]
pca_df['PC3'] = Y_sklearn[:,2]
(ggplot(pca_df, aes(x='PC1', y='PC3', color='sample'))
+ geom_point(size=5, alpha=0.5)
+ theme(figure_size=(12,10))
)
pca_df = pd.DataFrame()
pca_df['sample'] = ['CE_%i' % (x+1) for x in range(10)]
pca_df['PCA2'] = Y_sklearn[:,1]
pca_df['PCA4'] = Y_sklearn[:,3]
(ggplot(pca_df, aes(x='PCA2', y='PCA4', color='sample'))
+ geom_point(size=5, alpha=0.5)
+ theme(figure_size=(12,10))
)
"""
Explanation: Y_sklearn is a numpy array of the shape (num_samples, n_components) where original X data is projected onto the number of extracted principal components
Plot explained variance
End of explanation
"""
|
gwaygenomics/pancancer | scripts/ras_differential_expression.ipynb | bsd-3-clause | import os
import sys
import pandas as pd
import scipy
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import seaborn as sns
import plotnine as gg
sys.path.insert(0, os.path.join('..', 'scripts', 'util'))
from tcga_util import integrate_copy_number
%matplotlib inline
plt.style.use('seaborn-notebook')
"""
Explanation: Ras Differential Expression Analysis
Perform a t-test across all genes to determine differentially expressed genes between Ras wildtype and Ras mutant genes. Compare with what the model learns and output the data for visualization with R/ggplot2.
End of explanation
"""
# Load RNAseq matrix
expr_file = os.path.join('..', 'data', 'pancan_rnaseq_freeze.tsv')
rnaseq_full_df = pd.read_table(expr_file, index_col=0)
# Load Mutation matrix
mut_file = os.path.join('..', 'data', 'pancan_mutation_freeze.tsv')
mutation_df = pd.read_table(mut_file, index_col=0)
# Load sample freeze data and cancer genes
sample_freeze_file = os.path.join('..', 'data', 'sample_freeze.tsv')
sample_freeze = pd.read_table(sample_freeze_file, index_col=0)
cancer_gene_file = os.path.join('..', 'data', 'vogelstein_cancergenes.tsv')
cancer_genes = pd.read_table(cancer_gene_file)
# Load copy number data to determine final status matrix
copy_loss_file = os.path.join('..', 'data', 'copy_number_loss_status.tsv')
copy_gain_file = os.path.join('..', 'data', 'copy_number_gain_status.tsv')
copy_loss_df = pd.read_table(copy_loss_file, index_col=0)
copy_gain_df = pd.read_table(copy_gain_file, index_col=0)
# Load Coefficients File
coef_file = os.path.join('..', 'classifiers', 'RAS', 'classifier_coefficients.tsv')
coef_df = pd.read_table(coef_file, index_col=0)
"""
Explanation: Load Data
Loading RNAseq (X matrix), copy number and mutation (Y matrix), and coefficients data (W matrix from machine learning model)
End of explanation
"""
# Process y matrix
genes = ['KRAS', 'HRAS', 'NRAS']
y = mutation_df[genes]
y_df = integrate_copy_number(y=y, cancer_genes_df=cancer_genes,
genes=genes, loss_df=copy_loss_df,
gain_df=copy_gain_df)
y_df = y_df.assign(total_status=y.max(axis=1))
y_df = y_df.reset_index().merge(sample_freeze,
how='left').set_index('SAMPLE_BARCODE')
y_df['total_status'].value_counts()
# Write out files for easy use in R DEG analysis
ras_status_file = os.path.join('..', 'data', 'Ras_sample_status.tsv')
y_df.to_csv(ras_status_file, sep='\t')
ras_mad_genes_file = os.path.join('..', 'data', 'RNAseq_scaled_all_genes.tsv')
x_df = rnaseq_full_df.dropna(axis=1)
x_df_update = StandardScaler().fit_transform(x_df)
x_df_update = pd.DataFrame(x_df_update, columns=x_df.columns, index=x_df.index)
x_df = x_df_update
x_df.to_csv(ras_mad_genes_file, sep='\t')
x_df.shape
"""
Explanation: Process and Prep Data for Differential Expression Analysis
End of explanation
"""
# Get two arrays to compare
ras_wt_samples = y_df[y_df['total_status'] == 0].index
ras_mut_samples = y_df[y_df['total_status'] == 1].index
x_wt_df = x_df.loc[ras_wt_samples, :]
x_mut_df = x_df.loc[ras_mut_samples, :]
ttest_results = scipy.stats.ttest_ind(x_mut_df, x_wt_df)
t_stat = ttest_results.statistic
p_val = ttest_results.pvalue
ttest_df = pd.DataFrame(t_stat, columns=['stat'])
ttest_df = ttest_df.assign(pval = p_val)
ttest_df = ttest_df.assign(gene = x_wt_df.columns)
plot_df = pd.merge(ttest_df, coef_df, left_on='gene', right_on='feature')
plot_df.head(2)
p = (gg.ggplot(plot_df, gg.aes(x='weight', y='stat')) +
gg.geom_point(size=4, alpha=0.6) +
gg.theme_seaborn(style='whitegrid') +
gg.xlab('Ras Classifier Weight') +
gg.ylab('Differential Expression Score') +
gg.ggtitle('') +
gg.theme(
plot_title=gg.element_text(size=22),
axis_title_x=gg.element_text(size=16),
axis_title_y=gg.element_text(size=16),
axis_text_x=gg.element_text(size=14),
axis_text_y=gg.element_text(size=14),
axis_ticks_length=4,
legend_position=(1.0, 0.5),
legend_background=gg.element_blank(),
legend_key=gg.element_rect(fill='white'),
legend_text=gg.element_text(size=9),
legend_title=gg.element_text(size=12),
panel_border=gg.element_blank(),
panel_grid_major=gg.element_blank(),
panel_grid_minor=gg.element_blank()))
p
"""
Explanation: Example Naive Analysis using simple unadjusted t-tests
End of explanation
"""
|
daniel-koehn/Theory-of-seismic-waves-II | 04_FD_stability_dispersion/lecture_notebooks/1_fd_stability_dispersion.ipynb | gpl-3.0 | # Execute this cell to load the notebook's style sheet, then ignore it
from IPython.core.display import HTML
css_file = '../../style/custom.css'
HTML(open(css_file, "r").read())
"""
Explanation: Content under Creative Commons Attribution license CC-BY 4.0, code under BSD 3-Clause License © 2018 parts of this notebook are from this Jupyter notebook by Heiner Igel (@heinerigel), Lion Krischer (@krischer) and Taufiqurrahman (@git-taufiqurrahman) which is a supplemenatry material to the book Computational Seismology: A Practical Introduction, additional modifications by D. Koehn, notebook style sheet by L.A. Barba, N.C. Clementi
End of explanation
"""
# Import Libraries
# ----------------
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from pylab import rcParams
# Ignore Warning Messages
# -----------------------
import warnings
warnings.filterwarnings("ignore")
# Definition of modelling parameters
# ----------------------------------
xmax = 500 # maximum spatial extension of the 1D model (m)
dx = 0.5 # grid point distance in x-direction
tmax = 1.001 # maximum recording time of the seismogram (s)
dt = 0.0010 # time step
vp0 = 333. # P-wave speed in medium (m/s)
# acquisition geometry
xr = 365.0 # receiver position (m)
xsrc = 249.5 # source position (m)
f0 = 25. # dominant frequency of the source (Hz)
t0 = 4. / f0 # source time shift (s)
"""
Explanation: FD stability and dispersion
In the last lesson we developed a 1D acoustic FD modelling code. For the given modelling parameters, the code worked flawlessly and delivered modelled seismograms, which are in good agreement with the analytical solution. In this lesson we want to investigate how to choose optimum time steps dt and spatial grid point distances dx, to get stable and accurate FD modelling results. We start, by revisiting a simplified version of our 1D acoustic FD modelling code ...
End of explanation
"""
# 1D Wave Propagation (Finite Difference Solution)
# ------------------------------------------------
def FD_1D_acoustic(dt,dx):
nx = (int)(xmax/dx) # number of grid points in x-direction
print('nx = ',nx)
nt = (int)(tmax/dt) # maximum number of time steps
print('nt = ',nt)
ir = (int)(xr/dx) # receiver location in grid in x-direction
isrc = (int)(xsrc/dx) # source location in grid in x-direction
# Source time function (Gaussian)
# -------------------------------
src = np.zeros(nt + 1)
time = np.linspace(0 * dt, nt * dt, nt)
# 1st derivative of a Gaussian
src = -2. * (time - t0) * (f0 ** 2) * (np.exp(- (f0 ** 2) * (time - t0) ** 2))
# Analytical solution
# -------------------
G = time * 0.
# Initialize coordinates
# ----------------------
x = np.arange(nx)
x = x * dx # coordinate in x-direction
for it in range(nt): # Calculate Green's function (Heaviside function)
if (time[it] - np.abs(x[ir] - x[isrc]) / vp0) >= 0:
G[it] = 1. / (2 * vp0)
Gc = np.convolve(G, src * dt)
Gc = Gc[0:nt]
lim = Gc.max() # get limit value from the maximum amplitude
# Initialize empty pressure arrays
# --------------------------------
p = np.zeros(nx) # p at time n (now)
pold = np.zeros(nx) # p at time n-1 (past)
pnew = np.zeros(nx) # p at time n+1 (present)
d2px = np.zeros(nx) # 2nd space derivative of p
# Initialize model (assume homogeneous model)
# -------------------------------------------
vp = np.zeros(nx)
vp = vp + vp0 # initialize wave velocity in model
# Initialize empty seismogram
# ---------------------------
seis = np.zeros(nt)
# Calculate Partial Derivatives
# -----------------------------
for it in range(nt):
# FD approximation of spatial derivative by 3 point operator
for i in range(1, nx - 1):
d2px[i] = (p[i + 1] - 2 * p[i] + p[i - 1]) / dx ** 2
# Time Extrapolation
# ------------------
pnew = 2 * p - pold + vp ** 2 * dt ** 2 * d2px
# Add Source Term at isrc
# -----------------------
# Absolute pressure w.r.t analytical solution
pnew[isrc] = pnew[isrc] + src[it] / dx * dt ** 2
# Remap Time Levels
# -----------------
pold, p = p, pnew
# Output of Seismogram
# -----------------
seis[it] = p[ir]
# Compare FD Seismogram with analytical solution
# ----------------------------------------------
# Define figure size
rcParams['figure.figsize'] = 12, 5
plt.plot(time, seis, 'b-',lw=3,label="FD solution") # plot FD seismogram
Analy_seis = plt.plot(time,Gc,'r--',lw=3,label="Analytical solution") # plot analytical solution
plt.xlim(time[0], time[-1])
plt.ylim(-lim, lim)
plt.title('Seismogram')
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
plt.legend()
plt.grid()
plt.show()
dx = 0.5 # grid point distance in x-direction
dt = 0.0010 # time step
FD_1D_acoustic(dt,dx)
"""
Explanation: Comparison of numerical with analytical solution
In the function below we solve the homogeneous 1D acoustic wave equation by the 3-point spatial/temporal difference operator and compare the numerical results with the analytical solution. To play a little bit more with the modelling parameters, I restricted the input parameters to dt and dx. The number of spatial grid points and time steps, as well as the discrete source and receiver positions are estimated within this function.
End of explanation
"""
dx = 0.5 # grid point distance in x-direction
#dt = 0.0010 # old time step
dt = 0.0015023 # time step
FD_1D_acoustic(dt,dx)
"""
Explanation: This is the same result, we achieved in the last lesson. Now, you might get the smart idea to save some computation time by increasing the timestep dt. Let's try it ...
End of explanation
"""
# dx = 0.5 # old grid point distance in x-direction
dx = 7.0 # new grid point distance in x-direction
dt = 0.0010 # time step
FD_1D_acoustic(dt,dx)
"""
Explanation: Oops, maybe this idea was not so smart at all, because the modelling becomes unstable. Instead of increasing the time step dt, we could try to increase the spatial discretization dx to save computation time ...
End of explanation
"""
Nwave = np.arange(2,12,0.25) # numbers per wavelength
vp0 = 333.0 # P-wave velocity (m/s)
def dispersion_1D(eps):
vp_phase = (vp0*Nwave/(np.pi*eps)) * np.arcsin(eps*np.sin(np.pi/Nwave))
return vp_phase
vp_eps_1 = dispersion_1D(1.0)
vp_eps_2 = dispersion_1D(0.9)
vp_eps_3 = dispersion_1D(0.8)
vp_eps_4 = dispersion_1D(0.7)
plt.plot(Nwave, vp_eps_1, 'b-',lw=3,label=r"$\epsilon=1$")
plt.plot(Nwave, vp_eps_2, 'r-',lw=3,label=r"$\epsilon=0.9$")
plt.plot(Nwave, vp_eps_3, 'g-',lw=3,label=r"$\epsilon=0.8$")
plt.plot(Nwave, vp_eps_4, 'k-',lw=3,label=r"$\epsilon=0.7$")
plt.title('Grid dispersion')
plt.xlabel('Number of grid points per wavelength')
plt.ylabel('Phase velocity $v_{phase}$, m/s')
plt.legend()
plt.grid()
plt.show()
"""
Explanation: Hmm, the accurracy of the FD modelling result compared to the analytical solution is clearly deterioated, when the spatial grid point $dx$ is increased. And why does the P-body wave becomes dispersive? More generally, how do I choose $dx$ and $dt$ without using a trial-and-error approach, which requires a lot of computation time, especially when considering 3D modelling. To understand the underlying problems, we will investigate the stability and numerical dispersion of the FD method in the next two sections.
Stability of 1D acoustic wave equation finite difference approximation
To analyse the stability of the finite difference approximation of the 1D acoustic wave equation:
\begin{equation}
\frac{p_{j}^{n+1} - 2 p_{j}^n + p_{j}^{n-1}}{\mathrm{d}t^2} \ = \ vp_{j}^2 \frac{p_{j+1}^{n} - 2 p_{j}^n + p_{j-1}^{n}}{\mathrm{d}x^2},
\end{equation}
we use an approach introduced by the famous mathematician and pioneer of computational sciences John von Neumann. For the von Neumann Analysis, we assume harmonic plane wave solutions for the pressure wavefield like:
\begin{equation}
p = exp(i(kx-\omega t)),\nonumber
\end{equation}
with $i^2=-1$, the wavenumber $k$ and circular frequency $\omega$. Using the discrete
spatial coordinates:
$x_j = j dx,$
and times
$t_n = n dt.$
We can calculate discrete plane wave solutions at the discrete locations and times in eq. (1), for example at grid point j and time n:
\begin{equation}
p_j^n = exp(i(kjdx-\omega n dt)),\nonumber
\end{equation}
or at grid point j and time n+1:
\begin{align}
p_j^{n+1} &= exp(i(kjdx-\omega (n+1) dt))\nonumber\
&= exp(-i\omega dt)\; exp(i(kjdx-\omega n dt))\nonumber\
&= exp(-i\omega dt)\; p_j^n,\nonumber\
\end{align}
or at the grid point j and time n-1:
\begin{align}
p_j^{n-1} &= exp(i(kjdx-\omega (n-1) dt))\nonumber\
&= exp(i\omega dt)\; exp(i(kjdx-\omega n dt))\nonumber\
&= exp(i\omega dt)\; p_j^n.\nonumber\
\end{align}
Similar approximations can be estimated for time n at the spatial grid points j+1:
\begin{align}
p_{j+1}^{n} &= exp(i(k(j+1)dx-\omega n dt))\nonumber\
&= exp(ik dx)\; exp(i(kjdx-\omega n dt))\nonumber\
&= exp(ik dx)\; p_j^n,\nonumber\
\end{align}
and a grid point j-1:
\begin{align}
p_{j-1}^{n} &= exp(i(k(j-1)dx-\omega n dt))\nonumber\
&= exp(-ik dx)\; exp(i(kjdx-\omega n dt))\nonumber\
&= exp(-ik dx)\; p_j^n.\nonumber\
\end{align}
Inserting the discrete pressure wavefield solutions $p_j^{n+1}$, $p_j^{n-1}$, $p_{j+1}^{n}$ and $p_{j-1}^{n}$ in eq. (1), we get after some minor rearrangement:
\begin{equation}
exp(-i\omega dt)p_j^n - 2 p_j^n + exp(i\omega dt)p_j^n = vp_j^2 \frac{dt^2}{dx^2}\biggl(exp(-ik dx)p_j^n - 2 p_j^n + exp(ik dx)p_j^n\biggr).\nonumber
\end{equation}
Assuming that $p_j^n \ne 0$, we can divide the RHS and LHS by $p_j^n$
\begin{equation}
exp(-i\omega dt) - 2 + exp(i\omega dt) = vp_j^2 \frac{dt^2}{dx^2}\biggl(exp(-ik dx) - 2 + exp(ik dx)\biggr).\nonumber
\end{equation}
By further dividing RHS and LHS by 2, we get:
\begin{equation}
\frac{exp(i\omega dt) + exp(-i\omega dt)}{2} - 1 = vp_j^2 \frac{dt^2}{dx^2}\biggl(\frac{exp(ik dx) + exp(-ik dx)}{2} - 1\biggr).\nonumber
\end{equation}
Using the definition
\begin{equation}
\cos(x) = \frac{exp(ix) + exp(-ix)}{2},\nonumber
\end{equation}
we can simplify this expression to:
\begin{equation}
cos(\omega dt) - 1 = vp_j^2 \frac{dt^2}{dx^2}\biggl(cos(k dx) - 1\biggr).\nonumber
\end{equation}
After some further rearrangements and division of both sides by 2, leads to:
\begin{equation}
\frac{1 - cos(\omega dt)}{2} = vp_j^2 \frac{dt^2}{dx^2}\biggl(\frac{1 - cos(k dx)}{2}\biggr).\nonumber
\end{equation}
With the relation
\begin{equation}
sin^2\biggl(\frac{x}{2}\biggr) = \frac{1-cos(x)}{2}, \nonumber
\end{equation}
we get
\begin{equation}
sin^2\biggl(\frac{\omega dt}{2}\biggr) = vp_j^2 \frac{dt^2}{dx^2}\biggl(sin^2\biggl(\frac{k dx}{2}\biggr)\biggr).\nonumber
\end{equation}
Taking the square root of both sides finally leads to
\begin{equation}
sin\frac{\omega dt}{2} = vp_j \frac{dt}{dx}\biggl(sin\frac{k dx}{2}\biggr).
\end{equation}
This result is quite interesting. Notice, that the amplitude of the sine functions $sin(x)$ on the LHS and RHS vary between -1 and 1. However, if the factor on the RHS
\begin{equation}
\epsilon = vp_j \frac{dt}{dx} \nonumber
\end{equation}
is larger 1 ($\epsilon>1$), you get only imaginary solutions, while the real part is zero. Consequently, the numerical scheme becomes unstable. Therefore, the criterion
\begin{equation}
\epsilon = vp_j \frac{dt}{dx} \le 1 \nonumber
\end{equation}
has to be satisfied. This very important stability criterion was first described by the german-american mathematicians Richard Courant, Kurt Friedrichs and Hans Lewy in this paper from 1928. The Courant-Friedrichs-Lewy criterion or in short CFL-criterion, can also be rearranged to the time step dt, assuming that we have defined a spatial grid point distance dx:
\begin{equation}
dt \le \frac{dx}{vp_j}. \nonumber
\end{equation}
This criterion is only correct for the FD solution of the 1D acoustic wave equation using the 3-point spatial/temporal FD operators and an explicit time-stepping scheme (eq.(1)).
More generally, we can write the Courant criterion as
\begin{equation}
dt \le \frac{dx}{\zeta vp_j}, \nonumber
\end{equation}
where the factor $\zeta$ depends on the used FD operator, dimension of the problem (1D, 2D, 3D) and the overall algorithm. Even though the CFL criterion strictly depends on the P-wave velocity at a specific grid point, in most cases the maximum velocity $v_{max}$ in the medium is used to estimate a constant time step $dt$ for the whole FD modelling run:
\begin{equation}
dt \le \frac{dx}{\zeta v_{max}}, \nonumber
\end{equation}
where $v_{max}$ is the maximum P-wave velocity in the acoustic case or the maximum S-wave velocity for the SH-problem. While the fulfillment of the CFL criterion leads to a stable simulation, it does not guarantee accurate modelling results.
The CFL criterion allows us to estimate an appropriate time step dt based on the maximum velocity in the model and the spatial grid point distance. But how do we choose the spatial gridpoint distance dx?
Numerical grid dispersion
In the modelling examples at the beginning of this Jupyter notebook, we have seen that the modelled wavefield can become subject to dispersion, when choosing a too large spatial grid point distance. The result of the von Neumann analysis can also explain this behaviour. Starting from eq. (2)
\begin{equation}
sin\frac{\omega dt}{2} = \epsilon\; sin\frac{k dx}{2}, \nonumber
\end{equation}
we apply the $arcsin$ to both sides
\begin{equation}
\frac{\omega dt}{2} = arcsin\biggl(\epsilon\; sin\frac{k dx}{2} \biggr)\nonumber
\end{equation}
and multiply the result by $\frac{2}{dt}$, we get
\begin{equation}
\omega = \frac{2}{dt}arcsin\biggl(\epsilon\; sin\frac{k dx}{2}\biggr)\nonumber
\end{equation}
Inserting this $\omega-k$ dependence into the definition of the phase velocity
\begin{equation}
v_{phase} = \frac{\omega}{k},\nonumber
\end{equation}
leads to
\begin{equation}
v_{phase} = \frac{2}{k dt}arcsin\biggl(\epsilon\; sin\frac{k dx}{2}\biggr).\nonumber
\end{equation}
As you can see, the phase velocity of the numerical FD solution is a function of the wavenumber k. Therefore, it can be subject to dispersion. To investigate this problem in more detail, we rewrite the phase velocity.
With the wavenumber $k=\frac{2 \pi}{\lambda}$, where $\lambda$ denotes the wavelength, we get:
\begin{equation}
v_{phase} = \frac{\lambda}{\pi dt}arcsin\biggl(\epsilon\; sin\frac{\pi dx}{\lambda}\biggr).\nonumber
\end{equation}
From the definition of $\epsilon = vp_0 \frac{dt}{dx}$, we can replace $dt$ by $dt = \frac{\epsilon dx}{vp_0}$ in the phase velocity:
\begin{equation}
v_{phase} = \frac{\lambda vp_0}{\pi \epsilon dx}arcsin\biggl(\epsilon\; sin\frac{\pi dx}{\lambda}\biggr).\nonumber
\end{equation}
Introducing the number of grid points per wavelength $N_\lambda = \frac{\lambda}{dx}$, we finally get:
\begin{equation}
v_{phase} = \frac{N_\lambda vp_0}{\pi \epsilon}arcsin\biggl(\epsilon\; sin\frac{\pi }{N_\lambda}\biggr).\nonumber
\end{equation}
Let's plot this result for $N_\lambda$ between 2 and 12, the homogeneous P-wave velocity $vp0\;=\;333\;m/s$, and $\epsilon$ values form 0.7 to 1.0 ...
End of explanation
"""
# calculate dx according to the grid dispersion criterion
Nlam = 12 # number of grid points per wavelength
fmax = 50.0 # fmax = 2 * f0 (Hz)
dx = vp0 / (Nlam*fmax) # spatial gridpoint distance (m)
print('dx = ', dx)
# calculate dt according to the CFL criterion
dt = dx / vp0 # time step (s)
# check CFL criterion
epsilon = vp0 * dt / dx
print('epsilon = ', epsilon)
if(epsilon>1.0):
print('Warning: CFL condition is violated!')
print('dt = ', dt)
FD_1D_acoustic(dt,dx)
"""
Explanation: Notice, that no grid dispersion occurs in the case of $\epsilon=1$. Keep in mind though that this only true for the homogeneous medium. Realistic modelling problems have a variable P-wave velocity, so we have not a constant $\epsilon$ within the model.
For all values $\epsilon<1$, numerical dispersion can occur, if the sampling of the spatial model is too small, especially when using only the Nyquist criterion $N_\lambda = 2$. For the 1D acoustic wave equation, the dispersion is minimized for $N_\lambda$ values between 8-12.
More generally, we can define the grid dispersion criterion for the spatial gridpoint distance
\begin{equation}
dx \le \frac{\lambda_{min}}{N_\lambda} = \frac{v_{min}}{N_\lambda f_{max}},\nonumber
\end{equation}
where $N_\lambda$ depends on the used FD operator, numerical scheme and also wave type, $v_{min}$ is the minimum P- or S-wave velocity in the model and $f_{max}$ the maximum frequency of the source wavelet.
Finally, let's apply the dispersion and stability criteria to our test problem in order to find optimum dt and dx values ...
End of explanation
"""
|
billzhao1990/CS231n-Spring-2017 | assignment2/ConvolutionalNetworks.ipynb | mit | # As usual, a bit of setup
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.cnn import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient_array, eval_numerical_gradient
from cs231n.layers import *
from cs231n.fast_layers import *
from cs231n.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in data.items():
print('%s: ' % k, v.shape)
"""
Explanation: Convolutional Networks
So far we have worked with deep fully-connected networks, using them to explore different optimization strategies and network architectures. Fully-connected networks are a good testbed for experimentation because they are very computationally efficient, but in practice all state-of-the-art results use convolutional networks instead.
First you will implement several layer types that are used in convolutional networks. You will then use these layers to train a convolutional network on the CIFAR-10 dataset.
End of explanation
"""
x_shape = (2, 3, 4, 4)
w_shape = (3, 3, 4, 4)
x = np.linspace(-0.1, 0.5, num=np.prod(x_shape)).reshape(x_shape)
w = np.linspace(-0.2, 0.3, num=np.prod(w_shape)).reshape(w_shape)
b = np.linspace(-0.1, 0.2, num=3)
conv_param = {'stride': 2, 'pad': 1}
out, _ = conv_forward_naive(x, w, b, conv_param)
correct_out = np.array([[[[-0.08759809, -0.10987781],
[-0.18387192, -0.2109216 ]],
[[ 0.21027089, 0.21661097],
[ 0.22847626, 0.23004637]],
[[ 0.50813986, 0.54309974],
[ 0.64082444, 0.67101435]]],
[[[-0.98053589, -1.03143541],
[-1.19128892, -1.24695841]],
[[ 0.69108355, 0.66880383],
[ 0.59480972, 0.56776003]],
[[ 2.36270298, 2.36904306],
[ 2.38090835, 2.38247847]]]])
# Compare your output to ours; difference should be around 2e-8
print('Testing conv_forward_naive')
print('difference: ', rel_error(out, correct_out))
"""
Explanation: Convolution: Naive forward pass
The core of a convolutional network is the convolution operation. In the file cs231n/layers.py, implement the forward pass for the convolution layer in the function conv_forward_naive.
You don't have to worry too much about efficiency at this point; just write the code in whatever way you find most clear.
You can test your implementation by running the following:
End of explanation
"""
from scipy.misc import imread, imresize
kitten, puppy = imread('kitten.jpg'), imread('puppy.jpg')
# kitten is wide, and puppy is already square
d = kitten.shape[1] - kitten.shape[0]
kitten_cropped = kitten[:, d//2:-d//2, :]
img_size = 200 # Make this smaller if it runs too slow
x = np.zeros((2, 3, img_size, img_size))
x[0, :, :, :] = imresize(puppy, (img_size, img_size)).transpose((2, 0, 1))
x[1, :, :, :] = imresize(kitten_cropped, (img_size, img_size)).transpose((2, 0, 1))
# Set up a convolutional weights holding 2 filters, each 3x3
w = np.zeros((2, 3, 3, 3))
# The first filter converts the image to grayscale.
# Set up the red, green, and blue channels of the filter.
w[0, 0, :, :] = [[0, 0, 0], [0, 0.3, 0], [0, 0, 0]]
w[0, 1, :, :] = [[0, 0, 0], [0, 0.6, 0], [0, 0, 0]]
w[0, 2, :, :] = [[0, 0, 0], [0, 0.1, 0], [0, 0, 0]]
# Second filter detects horizontal edges in the blue channel.
w[1, 2, :, :] = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]]
# Vector of biases. We don't need any bias for the grayscale
# filter, but for the edge detection filter we want to add 128
# to each output so that nothing is negative.
b = np.array([0, 128])
# Compute the result of convolving each input in x with each filter in w,
# offsetting by b, and storing the results in out.
out, _ = conv_forward_naive(x, w, b, {'stride': 1, 'pad': 1})
def imshow_noax(img, normalize=True):
""" Tiny helper to show images as uint8 and remove axis labels """
if normalize:
img_max, img_min = np.max(img), np.min(img)
img = 255.0 * (img - img_min) / (img_max - img_min)
plt.imshow(img.astype('uint8'))
plt.gca().axis('off')
# Show the original images and the results of the conv operation
plt.subplot(2, 3, 1)
imshow_noax(puppy, normalize=False)
plt.title('Original image')
plt.subplot(2, 3, 2)
imshow_noax(out[0, 0])
plt.title('Grayscale')
plt.subplot(2, 3, 3)
imshow_noax(out[0, 1])
plt.title('Edges')
plt.subplot(2, 3, 4)
imshow_noax(kitten_cropped, normalize=False)
plt.subplot(2, 3, 5)
imshow_noax(out[1, 0])
plt.subplot(2, 3, 6)
imshow_noax(out[1, 1])
plt.show()
"""
Explanation: Aside: Image processing via convolutions
As fun way to both check your implementation and gain a better understanding of the type of operation that convolutional layers can perform, we will set up an input containing two images and manually set up filters that perform common image processing operations (grayscale conversion and edge detection). The convolution forward pass will apply these operations to each of the input images. We can then visualize the results as a sanity check.
End of explanation
"""
np.random.seed(231)
x = np.random.randn(4, 3, 5, 5)
w = np.random.randn(2, 3, 3, 3)
b = np.random.randn(2,)
dout = np.random.randn(4, 2, 5, 5)
conv_param = {'stride': 1, 'pad': 1}
dx_num = eval_numerical_gradient_array(lambda x: conv_forward_naive(x, w, b, conv_param)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: conv_forward_naive(x, w, b, conv_param)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: conv_forward_naive(x, w, b, conv_param)[0], b, dout)
out, cache = conv_forward_naive(x, w, b, conv_param)
dx, dw, db = conv_backward_naive(dout, cache)
# Your errors should be around 1e-8'
print('Testing conv_backward_naive function')
print('dx error: ', rel_error(dx, dx_num))
print('dw error: ', rel_error(dw, dw_num))
print('db error: ', rel_error(db, db_num))
"""
Explanation: Convolution: Naive backward pass
Implement the backward pass for the convolution operation in the function conv_backward_naive in the file cs231n/layers.py. Again, you don't need to worry too much about computational efficiency.
When you are done, run the following to check your backward pass with a numeric gradient check.
End of explanation
"""
x_shape = (2, 3, 4, 4)
x = np.linspace(-0.3, 0.4, num=np.prod(x_shape)).reshape(x_shape)
pool_param = {'pool_width': 2, 'pool_height': 2, 'stride': 2}
out, _ = max_pool_forward_naive(x, pool_param)
correct_out = np.array([[[[-0.26315789, -0.24842105],
[-0.20421053, -0.18947368]],
[[-0.14526316, -0.13052632],
[-0.08631579, -0.07157895]],
[[-0.02736842, -0.01263158],
[ 0.03157895, 0.04631579]]],
[[[ 0.09052632, 0.10526316],
[ 0.14947368, 0.16421053]],
[[ 0.20842105, 0.22315789],
[ 0.26736842, 0.28210526]],
[[ 0.32631579, 0.34105263],
[ 0.38526316, 0.4 ]]]])
# Compare your output with ours. Difference should be around 1e-8.
print('Testing max_pool_forward_naive function:')
print('difference: ', rel_error(out, correct_out))
"""
Explanation: Max pooling: Naive forward
Implement the forward pass for the max-pooling operation in the function max_pool_forward_naive in the file cs231n/layers.py. Again, don't worry too much about computational efficiency.
Check your implementation by running the following:
End of explanation
"""
np.random.seed(231)
x = np.random.randn(3, 2, 8, 8)
dout = np.random.randn(3, 2, 4, 4)
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
dx_num = eval_numerical_gradient_array(lambda x: max_pool_forward_naive(x, pool_param)[0], x, dout)
out, cache = max_pool_forward_naive(x, pool_param)
dx = max_pool_backward_naive(dout, cache)
# Your error should be around 1e-12
print('Testing max_pool_backward_naive function:')
print('dx error: ', rel_error(dx, dx_num))
"""
Explanation: Max pooling: Naive backward
Implement the backward pass for the max-pooling operation in the function max_pool_backward_naive in the file cs231n/layers.py. You don't need to worry about computational efficiency.
Check your implementation with numeric gradient checking by running the following:
End of explanation
"""
from cs231n.fast_layers import conv_forward_fast, conv_backward_fast
from time import time
np.random.seed(231)
x = np.random.randn(100, 3, 31, 31)
w = np.random.randn(25, 3, 3, 3)
b = np.random.randn(25,)
dout = np.random.randn(100, 25, 16, 16)
conv_param = {'stride': 2, 'pad': 1}
t0 = time()
out_naive, cache_naive = conv_forward_naive(x, w, b, conv_param)
t1 = time()
out_fast, cache_fast = conv_forward_fast(x, w, b, conv_param)
t2 = time()
print('Testing conv_forward_fast:')
print('Naive: %fs' % (t1 - t0))
print('Fast: %fs' % (t2 - t1))
print('Speedup: %fx' % ((t1 - t0) / (t2 - t1)))
print('Difference: ', rel_error(out_naive, out_fast))
t0 = time()
dx_naive, dw_naive, db_naive = conv_backward_naive(dout, cache_naive)
t1 = time()
dx_fast, dw_fast, db_fast = conv_backward_fast(dout, cache_fast)
t2 = time()
print('\nTesting conv_backward_fast:')
print('Naive: %fs' % (t1 - t0))
print('Fast: %fs' % (t2 - t1))
print('Speedup: %fx' % ((t1 - t0) / (t2 - t1)))
print('dx difference: ', rel_error(dx_naive, dx_fast))
print('dw difference: ', rel_error(dw_naive, dw_fast))
print('db difference: ', rel_error(db_naive, db_fast))
from cs231n.fast_layers import max_pool_forward_fast, max_pool_backward_fast
np.random.seed(231)
x = np.random.randn(100, 3, 32, 32)
dout = np.random.randn(100, 3, 16, 16)
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
t0 = time()
out_naive, cache_naive = max_pool_forward_naive(x, pool_param)
t1 = time()
out_fast, cache_fast = max_pool_forward_fast(x, pool_param)
t2 = time()
print('Testing pool_forward_fast:')
print('Naive: %fs' % (t1 - t0))
print('fast: %fs' % (t2 - t1))
print('speedup: %fx' % ((t1 - t0) / (t2 - t1)))
print('difference: ', rel_error(out_naive, out_fast))
t0 = time()
dx_naive = max_pool_backward_naive(dout, cache_naive)
t1 = time()
dx_fast = max_pool_backward_fast(dout, cache_fast)
t2 = time()
print('\nTesting pool_backward_fast:')
print('Naive: %fs' % (t1 - t0))
print('speedup: %fx' % ((t1 - t0) / (t2 - t1)))
print('dx difference: ', rel_error(dx_naive, dx_fast))
"""
Explanation: Fast layers
Making convolution and pooling layers fast can be challenging. To spare you the pain, we've provided fast implementations of the forward and backward passes for convolution and pooling layers in the file cs231n/fast_layers.py.
The fast convolution implementation depends on a Cython extension; to compile it you need to run the following from the cs231n directory:
bash
python setup.py build_ext --inplace
The API for the fast versions of the convolution and pooling layers is exactly the same as the naive versions that you implemented above: the forward pass receives data, weights, and parameters and produces outputs and a cache object; the backward pass recieves upstream derivatives and the cache object and produces gradients with respect to the data and weights.
NOTE: The fast implementation for pooling will only perform optimally if the pooling regions are non-overlapping and tile the input. If these conditions are not met then the fast pooling implementation will not be much faster than the naive implementation.
You can compare the performance of the naive and fast versions of these layers by running the following:
End of explanation
"""
from cs231n.layer_utils import conv_relu_pool_forward, conv_relu_pool_backward
np.random.seed(231)
x = np.random.randn(2, 3, 16, 16)
w = np.random.randn(3, 3, 3, 3)
b = np.random.randn(3,)
dout = np.random.randn(2, 3, 8, 8)
conv_param = {'stride': 1, 'pad': 1}
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
out, cache = conv_relu_pool_forward(x, w, b, conv_param, pool_param)
dx, dw, db = conv_relu_pool_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], b, dout)
print('Testing conv_relu_pool')
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))
from cs231n.layer_utils import conv_relu_forward, conv_relu_backward
np.random.seed(231)
x = np.random.randn(2, 3, 8, 8)
w = np.random.randn(3, 3, 3, 3)
b = np.random.randn(3,)
dout = np.random.randn(2, 3, 8, 8)
conv_param = {'stride': 1, 'pad': 1}
out, cache = conv_relu_forward(x, w, b, conv_param)
dx, dw, db = conv_relu_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: conv_relu_forward(x, w, b, conv_param)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: conv_relu_forward(x, w, b, conv_param)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: conv_relu_forward(x, w, b, conv_param)[0], b, dout)
print('Testing conv_relu:')
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))
"""
Explanation: Convolutional "sandwich" layers
Previously we introduced the concept of "sandwich" layers that combine multiple operations into commonly used patterns. In the file cs231n/layer_utils.py you will find sandwich layers that implement a few commonly used patterns for convolutional networks.
End of explanation
"""
model = ThreeLayerConvNet()
N = 50
X = np.random.randn(N, 3, 32, 32)
y = np.random.randint(10, size=N)
loss, grads = model.loss(X, y)
print('Initial loss (no regularization): ', loss)
model.reg = 0.5
loss, grads = model.loss(X, y)
print('Initial loss (with regularization): ', loss)
"""
Explanation: Three-layer ConvNet
Now that you have implemented all the necessary layers, we can put them together into a simple convolutional network.
Open the file cs231n/classifiers/cnn.py and complete the implementation of the ThreeLayerConvNet class. Run the following cells to help you debug:
Sanity check loss
After you build a new network, one of the first things you should do is sanity check the loss. When we use the softmax loss, we expect the loss for random weights (and no regularization) to be about log(C) for C classes. When we add regularization this should go up.
End of explanation
"""
num_inputs = 2
input_dim = (3, 16, 16)
reg = 0.0
num_classes = 10
np.random.seed(231)
X = np.random.randn(num_inputs, *input_dim)
y = np.random.randint(num_classes, size=num_inputs)
model = ThreeLayerConvNet(num_filters=3, filter_size=3,
input_dim=input_dim, hidden_dim=7,
dtype=np.float64)
loss, grads = model.loss(X, y)
for param_name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
param_grad_num = eval_numerical_gradient(f, model.params[param_name], verbose=False, h=1e-6)
e = rel_error(param_grad_num, grads[param_name])
print('%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name])))
"""
Explanation: Gradient check
After the loss looks reasonable, use numeric gradient checking to make sure that your backward pass is correct. When you use numeric gradient checking you should use a small amount of artifical data and a small number of neurons at each layer. Note: correct implementations may still have relative errors up to 1e-2.
End of explanation
"""
np.random.seed(231)
num_train = 100
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
model = ThreeLayerConvNet(weight_scale=1e-2)
solver = Solver(model, small_data,
num_epochs=15, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 5e-4,
},
verbose=True, print_every=1)
solver.train()
"""
Explanation: Overfit small data
A nice trick is to train your model with just a few training samples. You should be able to overfit small datasets, which will result in very high training accuracy and comparatively low validation accuracy.
End of explanation
"""
plt.subplot(2, 1, 1)
plt.plot(solver.loss_history, 'o')
plt.xlabel('iteration')
plt.ylabel('loss')
plt.subplot(2, 1, 2)
plt.plot(solver.train_acc_history, '-o')
plt.plot(solver.val_acc_history, '-o')
plt.legend(['train', 'val'], loc='upper left')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.show()
"""
Explanation: Plotting the loss, training accuracy, and validation accuracy should show clear overfitting:
End of explanation
"""
model = ThreeLayerConvNet(weight_scale=0.001, hidden_dim=500, reg=0.001)
solver = Solver(model, data,
num_epochs=1, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=20)
solver.train()
"""
Explanation: Train the net
By training the three-layer convolutional network for one epoch, you should achieve greater than 40% accuracy on the training set:
End of explanation
"""
from cs231n.vis_utils import visualize_grid
grid = visualize_grid(model.params['W1'].transpose(0, 2, 3, 1))
plt.imshow(grid.astype('uint8'))
plt.axis('off')
plt.gcf().set_size_inches(5, 5)
plt.show()
"""
Explanation: Visualize Filters
You can visualize the first-layer convolutional filters from the trained network by running the following:
End of explanation
"""
np.random.seed(231)
# Check the training-time forward pass by checking means and variances
# of features both before and after spatial batch normalization
N, C, H, W = 2, 3, 4, 5
x = 4 * np.random.randn(N, C, H, W) + 10
print('Before spatial batch normalization:')
print(' Shape: ', x.shape)
print(' Means: ', x.mean(axis=(0, 2, 3)))
print(' Stds: ', x.std(axis=(0, 2, 3)))
# Means should be close to zero and stds close to one
gamma, beta = np.ones(C), np.zeros(C)
bn_param = {'mode': 'train'}
out, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param)
print('After spatial batch normalization:')
print(' Shape: ', out.shape)
print(' Means: ', out.mean(axis=(0, 2, 3)))
print(' Stds: ', out.std(axis=(0, 2, 3)))
# Means should be close to beta and stds close to gamma
gamma, beta = np.asarray([3, 4, 5]), np.asarray([6, 7, 8])
out, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param)
print('After spatial batch normalization (nontrivial gamma, beta):')
print(' Shape: ', out.shape)
print(' Means: ', out.mean(axis=(0, 2, 3)))
print(' Stds: ', out.std(axis=(0, 2, 3)))
np.random.seed(231)
# Check the test-time forward pass by running the training-time
# forward pass many times to warm up the running averages, and then
# checking the means and variances of activations after a test-time
# forward pass.
N, C, H, W = 10, 4, 11, 12
bn_param = {'mode': 'train'}
gamma = np.ones(C)
beta = np.zeros(C)
for t in range(50):
x = 2.3 * np.random.randn(N, C, H, W) + 13
spatial_batchnorm_forward(x, gamma, beta, bn_param)
bn_param['mode'] = 'test'
x = 2.3 * np.random.randn(N, C, H, W) + 13
a_norm, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param)
# Means should be close to zero and stds close to one, but will be
# noisier than training-time forward passes.
print('After spatial batch normalization (test-time):')
print(' means: ', a_norm.mean(axis=(0, 2, 3)))
print(' stds: ', a_norm.std(axis=(0, 2, 3)))
"""
Explanation: Spatial Batch Normalization
We already saw that batch normalization is a very useful technique for training deep fully-connected networks. Batch normalization can also be used for convolutional networks, but we need to tweak it a bit; the modification will be called "spatial batch normalization."
Normally batch-normalization accepts inputs of shape (N, D) and produces outputs of shape (N, D), where we normalize across the minibatch dimension N. For data coming from convolutional layers, batch normalization needs to accept inputs of shape (N, C, H, W) and produce outputs of shape (N, C, H, W) where the N dimension gives the minibatch size and the (H, W) dimensions give the spatial size of the feature map.
If the feature map was produced using convolutions, then we expect the statistics of each feature channel to be relatively consistent both between different images and different locations within the same image. Therefore spatial batch normalization computes a mean and variance for each of the C feature channels by computing statistics over both the minibatch dimension N and the spatial dimensions H and W.
Spatial batch normalization: forward
In the file cs231n/layers.py, implement the forward pass for spatial batch normalization in the function spatial_batchnorm_forward. Check your implementation by running the following:
End of explanation
"""
np.random.seed(231)
N, C, H, W = 2, 3, 4, 5
x = 5 * np.random.randn(N, C, H, W) + 12
gamma = np.random.randn(C)
beta = np.random.randn(C)
dout = np.random.randn(N, C, H, W)
bn_param = {'mode': 'train'}
fx = lambda x: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0]
fg = lambda a: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0]
fb = lambda b: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
da_num = eval_numerical_gradient_array(fg, gamma, dout)
db_num = eval_numerical_gradient_array(fb, beta, dout)
_, cache = spatial_batchnorm_forward(x, gamma, beta, bn_param)
dx, dgamma, dbeta = spatial_batchnorm_backward(dout, cache)
print('dx error: ', rel_error(dx_num, dx))
print('dgamma error: ', rel_error(da_num, dgamma))
print('dbeta error: ', rel_error(db_num, dbeta))
"""
Explanation: Spatial batch normalization: backward
In the file cs231n/layers.py, implement the backward pass for spatial batch normalization in the function spatial_batchnorm_backward. Run the following to check your implementation using a numeric gradient check:
End of explanation
"""
|
jmhsi/justin_tinker | data_science/courses/deeplearning2/kmeans_test.ipynb | apache-2.0 | import kmeans; reload(kmeans)
from kmeans import Kmeans
"""
Explanation: Clustering
Clustering techniques are unsupervised learning algorithms that try to group unlabelled data into "clusters", using the (typically spatial) structure of the data itself.
End of explanation
"""
n_clusters=6
n_samples =250
"""
Explanation: The easiest way to demonstrate how clustering works is to simply generate some data and show them in action.
Create data
End of explanation
"""
centroids = np.random.uniform(-35, 35, (n_clusters, 2))
slices = [np.random.multivariate_normal(centroids[i], np.diag([5., 5.]), n_samples)
for i in range(n_clusters)]
data = np.concatenate(slices).astype(np.float32)
"""
Explanation: To generate our data, we're going to pick 6 random points, which we'll call centroids, and for each point we're going to generate 250 random points about it.
In statistical parlance, we're going to simulate 1500 realizations from 6 different bivariate normal distributions (250 each) with random centroids over the range -35, 35.
End of explanation
"""
kmeans.plot_data(centroids, data, n_samples)
"""
Explanation: Below we can see each centroid marked w/ X, and the coloring associated to each respective cluster.
End of explanation
"""
k = Kmeans(data, n_clusters)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
initial_centroids = k.find_initial_centroids(n_clusters).eval()
"""
Explanation: K means
The goal of clustering is to identify these clusters, defined by their centroids, given the raw data with no labels. Once these centroids have been identified, each point is labelled as belonging to the centroid it is closest to.
K means clustering is a simple and popular approach of finding appropriate centroids. It does this by taking random centroids, and iteratively moving them to make the clusters as compact as possible.
The algorithm is very simple:
- Select the number of clusters N you want to find
- Guess N random centroids (more on this below)
- While centroids change:
- Create clusters by assigning each point to the nearest centroid
- For each cluster, define the new centroid as the centroid of all points assigned to it
Typically the algorithm is terminated once the change in centroids is negligible or after a certain number of iterations.
While guessing random centroids is fine in theory, most implementations use the data itself to identify initial cluster points. This speeds up convergence by initializing centroids in the appropriate regime. Typically, inital points are selected amongst the data and tries to pick them to be as far apart as possible.
End of explanation
"""
kmeans.plot_data(initial_centroids, data, n_samples)
curr_centroids = tf.Variable(initial_centroids)
nearest_indices = k.assign_to_nearest(curr_centroids)
updated_centroids = k.update_centroids(nearest_indices)
tf.global_variables_initializer().run()
"""
Explanation: Inital "random" guesses, based on the data.
End of explanation
"""
kmeans.plot_data(updated_centroids.eval(), data, n_samples)
curr_centroids.assign(updated_centroids)
with tf.Session().as_default(): new_centroids = k.run()
kmeans.plot_data(new_centroids, data, n_samples)
"""
Explanation: Updated centroids after one iteration.
End of explanation
"""
def gaussian(d, bw):
return np.exp(-0.5*((d/bw))**2) / (bw*math.sqrt(2*math.pi))
"""
Explanation: The result are centroids that have minimized the total distance between all points and their centroids; the centroids are "optimal" in this sense.
There are some problems with K-means clustering.
* Number of clusters needs to be known a priori
* This is an obvious failure of an unsupervised learning algorithm; we want the data to "speak for itself"
* Difficult to identify in higher dimensions
Naive approach only works if the clusters are the same shape
This is because centroid is identified using euclidean distance
Mean shift
Mean shift clustering is a newer and less well-known approach.
The algorithm is as follows:
* Take each data point X
* For each x in X, find the distance between point x and every other point in X
* Create weights for each point in X by using the Gaussian function of that point's distance to x
* Gaussian function here is the density function of a Normal distribution of distances with mean 0
* This weighting approach penalizes points further away from x
* The rate at which the weights fall to zero is determined by the bandwidth, which is the standard deviation of the Gaussian
* Update x as the weighted average of all other points in X, defined by the weights determined in the previous step
This will iteratively push points that are close together even closer until they are next to each other.
End of explanation
"""
def meanshift(data):
X = np.copy(data)
for it in range(5):
for i, x in enumerate(X):
dist = np.sqrt(((x-X)**2).sum(1))
weight = gaussian(dist, 2.5)
X[i] = (np.expand_dims(weight,1)*X).sum(0) / weight.sum()
return X
%time X=meanshift(data)
"""
Explanation: In our implementation, we choose the bandwidth to be 2.5.
One easy way to choose bandwidth is to find which bandwidth covers one third of the data.
End of explanation
"""
kmeans.plot_data(centroids+2, X, n_samples)
"""
Explanation: We can see that mean shift clustering has almost reproduced our original clustering. The one exception are the very close clusters, but if we really wanted to differentiate them we could lower the bandwidth.
What is impressive is that this algorithm nearly reproduced the original clusters without telling it how many clusters there should be.
End of explanation
"""
import torch_utils; reload(torch_utils)
from torch_utils import *
"""
Explanation: We should be able to accelerate this algorithm with a GPU.
PyTorch
GPU-accelerated mean shift implementation in pytorch
End of explanation
"""
def gaussian(d, bw):
return torch.exp(-0.5*((d/bw))**2) / (bw*math.sqrt(2*math.pi))
"""
Explanation: The advantage of pytorch is that it's very similar to numpy.
End of explanation
"""
def meanshift(data):
X = torch.FloatTensor(np.copy(data))
for it in range(5):
for i, x in enumerate(X):
dist = torch.sqrt((sub(x, X)**2).sum(1))
weight = gaussian(dist, 3)
num = mul(weight, X).sum(0)
X[i] = num / weight.sum()
return X
"""
Explanation: Torch does not support broadcasting, therefore Jeremy has replaced the distance subtraction line with a subtraction function from his custom pytorch broadcasting library.
End of explanation
"""
%time X = meanshift(data).numpy()
"""
Explanation: This implementation actually takes longer.
End of explanation
"""
kmeans.plot_data(centroids+2, X, n_samples)
"""
Explanation: All the computation is happening in the <tt>for</tt> loop, which isn't accelerated by pytorch.
Each iteration launches a new cuda kernel, which takes time and slows the algorithm down as a whole.
End of explanation
"""
def dist_b(a,b):
return torch.sqrt((sub(a.unsqueeze(0),b.unsqueeze(1))**2).sum(2))
a=torch.rand(2,2)
b=torch.rand(3,2)
dist_b(b, a).squeeze(2)
def gaussian(d, bw):
return torch.exp(-0.5*((d/bw))**2) / (bw*math.sqrt(2*math.pi))
def sum_sqz(a,axis): return a.sum(axis).squeeze(axis)
def meanshift(data, bs=500):
n = len(data)
X = torch.FloatTensor(np.copy(data)).cuda()
for it in range(5):
for i in range(0,n,bs):
s = slice(i,min(n,i+bs))
weight = gaussian(dist_b(X, X[s]), 2)
num = sum_sqz(mul(weight, X), 1)
X[s] = div(num, sum_sqz(weight, 1))
return X
"""
Explanation: GPU
To truly accelerate the algorithm, we need to be performing updates on a batch of points per iteration, instead of just one as we were doing.
End of explanation
"""
%time X = meanshift(data).cpu().numpy()
kmeans.plot_data(centroids+2, X, n_samples)
"""
Explanation: Although each iteration still has to launch a new cuda kernel, there are now fewer iterations, and the acceleration from updating a batch of points more than makes up for it.
End of explanation
"""
from sklearn.neighbors import LSHForest, KDTree, BallTree
n_clusters=6
n_samples =2500
centroids = np.random.uniform(-35, 35, (n_clusters, 2))
slices = [np.random.multivariate_normal(centroids[i], np.diag([5., 5.]), n_samples)
for i in range(n_clusters)]
data = np.concatenate(slices).astype(np.float32)
nn = KDTree(data)
nearest = nn.query(data[:10], 3, False); nearest
nn = BallTree(data)
nearest = nn.query(data[:10], 3, False); nearest
kmeans.plot_data(centroids, data, n_samples)
def index_b(a,idxs):
ir, ic = idxs.size()
ar, ac = a.size()
return a[idxs.view(ir*ic)].view(ir,ic,ac)
a = FT([[1,2],[3.,4],[5,6]])
b = torch.LongTensor([[0,1], [1,2]])
exp = FT([[[1,2], [3,4.]], [[3,4], [5,6]]])
assert(torch.equal(index_b(a,b), exp))
def dist_b_n(a,b,pts):
dists = sub(pts,b.unsqueeze(1))**2
return torch.sqrt(dists.sum(2))
def meanshift(data, bs=512):
n = len(data)
X = torch.FloatTensor(np.copy(data)).cuda()
for it in range(5):
d = X.cpu().numpy()
nn = BallTree(d)
for i in range(0,n,bs):
s = slice(i,min(n,i+bs))
nearest = torch.LongTensor(nn.query(d[s], 50, False)).cuda()
pts = index_b(X, nearest)
weight = gaussian(dist_b_n(X, X[s], pts), 2)
num = sum_sqz(mul(weight, pts), 1)
X[s] = div(num, sum_sqz(weight, 1))
return X
%time data = meanshift(data).cpu().numpy()
kmeans.plot_data(centroids+1, data, n_samples)
"""
Explanation: LSH
TO-DO: Needs notes?
End of explanation
"""
|
fzotter/Ambisonic-Jupyter-Notebook | 02-rErVofVBAPandVBIPandMDAPonCircle.ipynb | mit | import numpy as np
from numpy.linalg import inv
def vectorpan(xys,xyls,simplices):
g=np.zeros(phils.shape[0])
for n in range(0,simplices.shape[0]):
na=simplices[n,0]
nb=simplices[n,1]
M=np.array([xyls[:,na],xyls[:,nb]]).T
gnm=np.dot(inv(M),xys)
if np.sum(gnm<-1e-3)==0:
g[na]=gnm[0]
g[nb]=gnm[1]
break
return g
"""
Explanation: rE and rV measures for 2D Vector-Base Amplitude Panning (VBAP,VBIP,MDAP)
Acoustic Holography and Holophony
Franz Zotter, 2016.
The examples below show the directions and widths indicated by the $\boldsymbol{r}\mathrm{V}$ and $\boldsymbol{r}\mathrm{E}$ measures for pairwise vector-base amplitude panning along the horizon.
Assume that you are given the desired panning direction as
\begin{equation}
\boldsymbol\theta=\begin{bmatrix}
\cos\varphi_\mathrm{s}\
\sin\varphi_\mathrm{s}
\end{bmatrix}
\end{equation}
and the coordinates of your loudspeakers as
\begin{equation}
\mathbf{X}=\begin{bmatrix}
\cos\varphi_1, &\dots, &\cos\varphi_\mathrm{L}\
\sin\varphi_1, &\dots, &\sin\varphi_\mathrm{L}
\end{bmatrix}.
\end{equation}
Then VBAP searches the loudspeaker pair that encloses the direction $\boldsymbol\theta$. This is done by constructing the convex hull, which defines the lines (simplices) connecting all the loudspeaker pairs by the loudspeaker indices involved. For a ring of $\mathrm{L}$ loudspeakers with loudspeakers sorted in azimuth, it looks like this
\begin{equation}
Q=\mathrm{convhull}{\mathbf{X}}=\begin{bmatrix}
1& 2\
2& 3\
\vdots &\vdots\
a& b\
\vdots &\vdots\
\mathrm{L}-1& \mathrm{L}\
\mathrm{L} & 1
\end{bmatrix}.
\end{equation}
Now VBAP searches through the convex hull to find the loudspeaker pair (simplex of the indices $a,b$) of which the wheighted superposition of direction vectors allows for a solution with numerically all-positive weights $\boldsymbol{g}_q=[g_a,g_b]$ with $g_a,g_b>0$ to represent $\boldsymbol\theta$:
<blockquote>
<p>$\boldsymbol g=\boldsymbol 0$ with length $\mathrm{L}$
<p>For all $q$ indexing through all pairs:</p>
<blockquote>
<p>
$\boldsymbol{g}_q=\mathbf{X}[Q[q,:]]^{-1}\boldsymbol{\theta}$
</p>
<p>
if $g_a,g_b>-10^{-3}$:
</p>
<blockquote>
<p>$\boldsymbol{g}[[a,b],:]=\boldsymbol{g}_q$</p>
<p>break loop</p>
</blockquote>
</blockquote>
</blockquote>
This algorithm is defined below.
End of explanation
"""
def r_vector(g,theta):
L=theta.size
thx=np.sin(theta[:])
thy=np.cos(theta[:])
rx=np.dot(thx,g).T
ry=np.dot(thy,g).T
normalizer=sum(g,0)
rx/=normalizer
ry/=normalizer
return np.array([rx,ry])
"""
Explanation: Vector measures for arbitrarily many loudspeakers
The vector measures are easily extended to more than 2 loudspeakers, i.e., to
\begin{equation}
\boldsymbol{r}\mathrm{V}=
\frac{\sum{l=1}^\mathrm{L}g_l\,\boldsymbol\theta_l}{\sum_{l=1}^\mathrm{L}g_l},
\qquad\qquad
\boldsymbol{r}\mathrm{E}=
\frac{\sum{l=1}^\mathrm{L}g_l^2\,\boldsymbol\theta_l}{\sum_{l=1}^\mathrm{L}g_l^2}.
\end{equation}
End of explanation
"""
from scipy.spatial import ConvexHull
from bokeh.plotting import figure, output_file, show
from bokeh.io import output_notebook
L=5
Npts=200
phils=np.arange(0,L)*2*np.pi/L
xyls=np.array([np.cos(phils),np.sin(phils)])
phis=np.linspace(-np.pi*0.99,np.pi,Npts)
xys=np.array([np.cos(phis),np.sin(phis)])
g=np.zeros([L,Npts])
qh=ConvexHull(xyls.T)
for n in range(0,Npts):
gn=vectorpan(xys[:,n],xyls,qh.simplices)
gn=np.abs(gn)
gn/=np.sqrt(np.sum(gn))
g[:,n]=gn
output_notebook()
p1=figure(title="r vector direction VBAP",plot_width=300,plot_height=250)
p2=figure(title="r vector width VBAP",plot_width=300,plot_height=250,x_range=[-180,180],y_range=[0,100])
r=r_vector(g,phils)
mr=np.sqrt(np.sum(r**2,0))
p1.line(phis*180/np.pi,np.arctan2(r[0,:],r[1,:])*180/np.pi,legend_label="rV",line_width=2)
p2.line(phis*180/np.pi,2*np.arccos(mr)*180/np.pi,legend_label="2acos||rV||",line_width=2)
r=r_vector(g**2,phils)
mr=np.sqrt(np.sum(r**2,0))
p1.line(phis*180/np.pi,np.arctan2(r[0,:],r[1,:])*180/np.pi,legend_label="rE",color="red",line_width=2)
p2.line(phis*180/np.pi,2*np.arccos(mr)*180/np.pi,legend_label="2acos||rE||",color="red",line_width=2)
p1.legend.location="top_left"
p2.legend.background_fill_alpha = 0.5
show(p1)
show(p2)
"""
Explanation: This vector model is now applied on an example using 5 equally spaced loudpeaker on the horizon and panning for all directions from $-180^\circ\dots180^\circ$. It shows again the directions and widths displayed by $\boldsymbol r_\mathrm{V,E}$.
End of explanation
"""
for n in range(0,Npts):
gn=vectorpan(xys[:,n],xyls,qh.simplices)
gn=np.abs(gn)
# new line inserted just for VBIP:
gn=np.sqrt(gn)
gn/=np.sqrt(np.sum(gn))
g[:,n]=gn
p3=figure(title="r vector direction VBIP",plot_width=300,plot_height=250)
p4=figure(title="r vector width VBIP",plot_width=300,plot_height=250,x_range=[-180,180],y_range=[0,100])
r=r_vector(g,phils)
mr=np.sqrt(np.sum(r**2,0))
p3.line(phis*180/np.pi,np.arctan2(r[0,:],r[1,:])*180/np.pi,legend_label="rV",line_width=2)
p4.line(phis*180/np.pi,2*np.arccos(mr)*180/np.pi,legend_label="2acos||rV||",line_width=2)
r=r_vector(g**2,phils)
mr=np.sqrt(np.sum(r**2,0))
p3.line(phis*180/np.pi,np.arctan2(r[0,:],r[1,:])*180/np.pi,legend_label="rE",color="red",line_width=2)
p4.line(phis*180/np.pi,2*np.arccos(mr)*180/np.pi,legend_label="2acos||rE||",color="red",line_width=2)
p3.legend.location="top_left"
p4.legend.background_fill_alpha = 0.5
show(p3)
show(p4)
"""
Explanation: It seems that the vectors do not indicate the same directions: only the $\boldsymbol r_\mathrm{V}$ vector is perfectly controlled. Alternatively to VBAP, VBIP could be done, which just takes the square roots of the panning gains obtained from VBAP, before normalization.
End of explanation
"""
alpha=phils[1]-phils[0]
xys2=np.array([np.cos(phis-alpha/2),np.sin(phis-alpha/2)])
xys3=np.array([np.cos(phis+alpha/2),np.sin(phis+alpha/2)])
for n in range(0,Npts):
gn=vectorpan(xys[:,n],xyls,qh.simplices)
# lines inserted for MDAP
gn+=vectorpan(xys2[:,n],xyls,qh.simplices)
gn+=vectorpan(xys3[:,n],xyls,qh.simplices)
gn=np.abs(gn)
gn/=np.sqrt(np.sum(gn))
g[:,n]=gn
p5=figure(title="r vector direction MDAP",plot_width=300,plot_height=250)
p6=figure(title="r vector width MDAP",plot_width=300,plot_height=250,x_range=[-180,180],y_range=[0,100])
r=r_vector(g,phils)
mr=np.sqrt(np.sum(r**2,0))
p5.line(phis*180/np.pi,np.arctan2(r[0,:],r[1,:])*180/np.pi,legend_label="rV",line_width=2)
p6.line(phis*180/np.pi,2*np.arccos(mr)*180/np.pi,legend_label="2acos||rV||",line_width=2)
r=r_vector(g**2,phils)
mr=np.sqrt(np.sum(r**2,0))
p5.line(phis*180/np.pi,np.arctan2(r[0,:],r[1,:])*180/np.pi,legend_label="rE",color="red",line_width=2)
p6.line(phis*180/np.pi,2*np.arccos(mr)*180/np.pi,legend_label="2acos||rE||",color="red",line_width=2)
p5.legend.location="top_left"
p6.legend.background_fill_alpha = 0.5
show(p5)
show(p6)
"""
Explanation: Now the $\boldsymbol r_\mathrm{E}$ vector is perfectly controlled and the widths tend to have narrower notches.
Multi-Direction Amplitude Panning (MDAP)
MDAP does the same as VBAP, but it always superimposes several sources to obtain the panning gains. One could describe it as
\begin{equation}
\boldsymbol{\tilde g}=\mathrm{VBAP}{\varphi_\mathrm{s}-\textstyle\frac{\alpha}{2},\mathbf{X}}+\mathrm{VBAP}{\varphi_\mathrm{s},\mathbf{X}}+\mathrm{VBAP}{\varphi_\mathrm{s}+\textstyle\frac{\alpha}{2},\mathbf{X}}\qquad\qquad
\boldsymbol g=\frac{\boldsymbol{\tilde g}}{\|\boldsymbol{\tilde g}\|}
\end{equation}
End of explanation
"""
|
ehongdata/Network-Analysis-Made-Simple | 3. Hubs and Paths (Student).ipynb | mit | # Let's find out the number of neighbors that individual #7 has.
G.neighbors(7)
"""
Explanation: Hubs: How do we evaluate the importance of some individuals in a network?
Within a social network, there will be certain individuals which perform certain important functions. For example, there may be hyper-connected individuals who are connected to many, many more people. They would be of use in the spreading of information. Alternatively, if this were a disease contact network, identifying them would be useful in stopping the spread of diseases. How would one identify these people?
Approach 1: Neighbors
One way we could compute this is to find out the number of people an individual is conencted to. NetworkX let's us do this by giving us a G.neighbors(node) function.
End of explanation
"""
nx.degree_centrality(G)
"""
Explanation: Exercise
Can you create a ranked list of the importance of each individual, based on the number of neighbors they have?
Hint: One suggested output would be a list of tuples, where the first element in each tuple is the node ID (an integer number), and the second element is a list of its neighbors.
Hint: Python's sorted(iterable, key=lambda x:...., reverse=True) function may be of help here.
Approach 2: Degree Centrality
The number of other nodes that one node is connected to is a measure of its centrality. NetworkX implements a degree centrality, which is defined as the number of neighbors that a node has normalized to the number of individuals it could be connected to in the entire graph. This is accessed by using nx.degree_centrality(G)
End of explanation
"""
# Your answer here.
"""
Explanation: If you inspect the dictionary closely, you will find that node 19 is the one that has the highest degree centrality, just as we had measured by counting the number of neighbors.
There are other measures of centrality, namely betweenness centrality, flow centrality and load centrality. You can take a look at their definitions on the NetworkX API docs and their cited references. You can also define your own measures if those don't fit your needs, but that is an advanced topic that won't be dealt with here.
The NetworkX API docs that document the centrality measures are here: http://networkx.github.io/documentation/networkx-1.9.1/reference/algorithms.centrality.html
Exercises
Can you create a histogram of the distribution of degree centralities? (1-2 min)
Can you create a histogram of the distribution of number of neighbors? (1-2 min)
Can you create a scatterplot of the degree centralities against number of neighbors? (1-2 min)
If I have n nodes, then how many possible edges are there in total, assuming self-edges are allowed? What if self-edges are not allowed?
Time: 3-6 min.
Hint: You may want to use:
plt.hist(list_of_values)
and
plt.scatter(x_values, y_values)
If you know the Matplotlib API, feel free to get fancy :).
End of explanation
"""
nx.draw(G, with_labels=True)
"""
Explanation: Paths in a Network
Graph traversal is akin to walking along the graph, node by node, restricted by the edges that connect the nodes. Graph traversal is particularly useful for understanding the local structure (e.g. connectivity, retrieving the exact relationships) of certain portions of the graph and for finding paths that connect two nodes in the network.
Using the synthetic social network, we will figure out how to answer the following questions:
How long will it take for a message to spread through this group of friends? (making some assumptions, of course)
How do we find the shortest path to get from individual A to individual B?
Shortest Path
End of explanation
"""
def path_exists(node1, node2, G):
"""
This function checks whether a path exists between two nodes (node1, node2) in graph G.
"""
"""
Explanation: Let's say we wanted to find the shortest path between two nodes. How would we approach this? One approach is what one would call a breadth-first search (http://en.wikipedia.org/wiki/Breadth-first_search). While not necessarily the fastest, it is the easiest to conceptualize.
The approach is essentially as such:
Begin with a queue of the starting node.
Add the neighbors of that node to the queue.
If destination node is present in the queue, end.
If destination node is not present, proceed.
For each node in the queue:
Remove node from the queue.
Add neighbors of the node to the queue. Check if destination node is present or not.
If destination node is present, break.
If destination node is not present, repeat step 3.
Exercise
Try implementing this algorithm in a function called path_exists(node1, node2, G).
The function should take in two nodes, node1 and node2, and the graph G that they belong to, and return a Boolean that indicates whether a path exists between those two nodes or not.
End of explanation
"""
path_exists(18, 5, G)
path_exists(29, 26, G)
"""
Explanation: And testing the function on a few test cases:
18 and any other node (should return False)
29 and 26 (should return True)
End of explanation
"""
nx.has_path(G, 18, 5)
"""
Explanation: Meanwhile... thankfully, NetworkX has a function for us to use, titled has_path, so we don't have to always implement this on our own. :-)
http://networkx.lanl.gov/reference/generated/networkx.algorithms.shortest_paths.generic.has_path.html#networkx.algorithms.shortest_paths.generic.has_path
End of explanation
"""
nx.draw(G, with_labels=True)
"""
Explanation: NetworkX also has other shortest path algorithms implemented.
https://networkx.github.io/documentation/latest/reference/generated/networkx.algorithms.shortest_paths.unweighted.predecessor.html#networkx.algorithms.shortest_paths.unweighted.predecessor
We can build upon these to build our own graph query functions. Let's see if we can trace the shortest path from one node to another.
End of explanation
"""
nx.shortest_path(G, 4, 14)
"""
Explanation: nx.shortest_path(G, source, target) gives us a list of nodes that exist within one of the shortest paths between the two nodes. (Not all paths are guaranteed to be found.)
End of explanation
"""
# Possible Answer:
def extract_path_edges(G, source, target):
"""
Fill in the code below.
"""
# Test your function with the following block of code.
newG = extract_path_edges(G, 1, 14)
nx.draw(newG, with_labels=True)
"""
Explanation: Incidentally, the node list is in order as well - we will travel through 19 and 17 in that order to get from 14 from 4.
Exercise
Write a function that extracts the edges in the shortest path between two nodes and puts them into a new graph, and draws it to the screen. It should also return an error if there is no path between the two nodes. (~5 min)
Hint: You may want to use G.subgraph(iterable_of_nodes) to extract just the nodes and edges of interest from the graph G. One coding pattern to consider is this:
newG = G.subgraph(nodes_of_interest)
newG will be comprised of the nodes of interest and the edges that connect them.
End of explanation
"""
# Possible Answer
def extract_neighbor_edges(G, node):
"""
Fill in code below.
"""
# Test your function with the following block of code.
fig = plt.figure(0)
newG = extract_neighbor_edges(G, 19)
nx.draw(newG, with_labels=True)
"""
Explanation: Exercise
Since we've been drawing some graphs to screen, we might as well draw a few other things while we're on a roll.
Write a function that extracts only node, its neighbors, and the edges between that node and its neighbors as a new graph. Then, draw the new graph to screen. (~5 min.)
End of explanation
"""
# Your answer to Question 1:
# All we need here is the length of the path.
def compute_transmission_time(G, source, target):
"""
Fill in code below.
"""
# Test with the following line of code.
compute_transmission_time(G, 14, 4)
# Your answer to Question 2:
# We need to know the length of every single shortest path between every pair of nodes.
# If we don't put a source and target into the nx.shortest_path_length(G) function call, then
# we get a dictionary of dictionaries, where all source-->target-->lengths are shown.
# Your answer to Question 3:
# You may want to use the Counter object from collections, as well as combinations from itertools.
from collections import Counter
from itertools import combinations
# Your answer to Question 4:
# Hint: You may want to use bar graphs or histograms.
plt.bar(totals.keys(), totals.values())
"""
Explanation: Challenge Exercises (optional)
Let's try some other problems that build on the NetworkX API. (10 min.)
Refer to the following for the relevant functions:
https://networkx.github.io/documentation/latest/reference/algorithms.shortest_paths.html
If we want a message to go from one person to another person, and we assume that the message takes 1 day for the initial step and 1 additional day per step in the transmission chain (i.e. the first step takes 1 day, the second step takes 2 days etc.), how long will the message take to spread from any two given individuals? Write a function to compute this.
What is the distribution of message spread times from person to person? What about chain lengths?
Are there certain individuals who consistently show up in the chain? (Hint: you might wish to use the following functions/objects:
Counter object from the collections module
combinations function from the itertools module.
all_shortest_paths(G, node1, node2) which is part of the networkX algorithms.
As a bonus, if you were able to compute the answer to question 3, can you plot a histogram of the number of times each node shows up in a connecting path?
End of explanation
"""
btws = nx.betweenness_centrality(G, normalized=False)
plt.bar(btws.keys(), btws.values())
"""
Explanation: Hubs Revisited
It looks like individual 19 is an important person of some sorts - if a message has to be passed through the network in the shortest time possible, then usually it'll go through person 19. Such a person has a high betweenness centrality. This is implemented as one of NetworkX's centrality algorithms. Check out the Wikipedia page for a further description.
http://en.wikipedia.org/wiki/Betweenness_centrality
End of explanation
"""
nx.draw(nx.barbell_graph(5, 1))
"""
Explanation: Exercise
Plot betweeness centrality against degree centrality for the synthetic social network above.
Think about it...
From the scatter plot, we can see that the dots don't all fall on the same line. Degree centrality and betweenness centrality don't necessarily correlate. Can you think of a reason why?
What would be the degree centrality and betweenness centrality of the middle connecting node in the barbell graph below?
End of explanation
"""
|
Kaggle/learntools | notebooks/feature_engineering/raw/ex4.ipynb | apache-2.0 | # Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.feature_engineering.ex4 import *
"""
Explanation: Introduction
In this exercise you'll use some feature selection algorithms to improve your model. Some methods take a while to run, so you'll write functions and verify they work on small samples.
To begin, run the code cell below to set up the exercise.
End of explanation
"""
import numpy as np
import pandas as pd
from sklearn import preprocessing, metrics
import lightgbm as lgb
import os
clicks = pd.read_parquet('../input/feature-engineering-data/baseline_data.pqt')
data_files = ['count_encodings.pqt',
'catboost_encodings.pqt',
'interactions.pqt',
'past_6hr_events.pqt',
'downloads.pqt',
'time_deltas.pqt',
'svd_encodings.pqt']
data_root = '../input/feature-engineering-data'
for file in data_files:
features = pd.read_parquet(os.path.join(data_root, file))
clicks = clicks.join(features)
def get_data_splits(dataframe, valid_fraction=0.1):
dataframe = dataframe.sort_values('click_time')
valid_rows = int(len(dataframe) * valid_fraction)
train = dataframe[:-valid_rows * 2]
# valid size == test size, last two sections of the data
valid = dataframe[-valid_rows * 2:-valid_rows]
test = dataframe[-valid_rows:]
return train, valid, test
def train_model(train, valid, test=None, feature_cols=None):
if feature_cols is None:
feature_cols = train.columns.drop(['click_time', 'attributed_time',
'is_attributed'])
dtrain = lgb.Dataset(train[feature_cols], label=train['is_attributed'])
dvalid = lgb.Dataset(valid[feature_cols], label=valid['is_attributed'])
param = {'num_leaves': 64, 'objective': 'binary',
'metric': 'auc', 'seed': 7}
num_round = 1000
print("Training model!")
bst = lgb.train(param, dtrain, num_round, valid_sets=[dvalid],
early_stopping_rounds=20, verbose_eval=False)
valid_pred = bst.predict(valid[feature_cols])
valid_score = metrics.roc_auc_score(valid['is_attributed'], valid_pred)
print(f"Validation AUC score: {valid_score}")
if test is not None:
test_pred = bst.predict(test[feature_cols])
test_score = metrics.roc_auc_score(test['is_attributed'], test_pred)
return bst, valid_score, test_score
else:
return bst, valid_score
"""
Explanation: Then run the following cell. It takes a minute or so to run.
End of explanation
"""
train, valid, test = get_data_splits(clicks)
_, baseline_score = train_model(train, valid)
"""
Explanation: Baseline Score
Let's look at the baseline score for all the features we've made so far.
End of explanation
"""
# Check your answer (Run this code cell to receive credit!)
q_1.solution()
"""
Explanation: 1) Which data to use for feature selection?
Since many feature selection methods require calculating statistics from the dataset, should you use all the data for feature selection?
Run the following line after you've decided your answer.
End of explanation
"""
from sklearn.feature_selection import SelectKBest, f_classif
feature_cols = clicks.columns.drop(['click_time', 'attributed_time', 'is_attributed'])
train, valid, test = get_data_splits(clicks)
# Create the selector, keeping 40 features
selector = ____
# Use the selector to retrieve the best features
X_new = ____
# Get back the kept features as a DataFrame with dropped columns as all 0s
selected_features = ____
# Find the columns that were dropped
dropped_columns = ____
# Check your answer
q_2.check()
# Uncomment these lines if you need some guidance
# q_2.hint()
# q_2.solution()
#%%RM_IF(PROD)%%
feature_cols = clicks.columns.drop(['click_time', 'attributed_time', 'is_attributed'])
train, valid, test = get_data_splits(clicks)
# Do feature extraction on the training data only!
selector = SelectKBest(f_classif, k=40)
X_new = selector.fit_transform(train[feature_cols], train['is_attributed'])
# Get back the features we've kept, zero out all other features
selected_features = pd.DataFrame(selector.inverse_transform(X_new),
index=train.index,
columns=feature_cols)
# Dropped columns have values of all 0s, so var is 0, drop them
dropped_columns = selected_features.columns[selected_features.var() == 0]
q_2.assert_check_passed()
_ = train_model(train.drop(dropped_columns, axis=1),
valid.drop(dropped_columns, axis=1))
"""
Explanation: Now we have 91 features we're using for predictions. With all these features, there is a good chance the model is overfitting the data. We might be able to reduce the overfitting by removing some features. Of course, the model's performance might decrease. But at least we'd be making the model smaller and faster without losing much performance.
2) Univariate Feature Selection
Below, use SelectKBest with the f_classif scoring function to choose 40 features from the 91 features in the data.
End of explanation
"""
# Check your answer (Run this code cell to receive credit!)
q_3.solution()
"""
Explanation: 3) The best value of K
With this method we can choose the best K features, but we still have to choose K ourselves. How would you find the "best" value of K? That is, you want it to be small so you're keeping the best features, but not so small that it's degrading the model's performance.
Run the following line after you've decided your answer.
End of explanation
"""
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectFromModel
def select_features_l1(X, y):
"""Return selected features using logistic regression with an L1 penalty."""
____
return ____
# Check your answer
q_4.check()
# Uncomment these if you're feeling stuck
#q_4.hint()
#q_4.solution()
#%%RM_IF(PROD)%%
def select_features_l1(X, y):
logistic = LogisticRegression(C=0.1, penalty="l1", solver='liblinear', random_state=7).fit(X, y)
model = SelectFromModel(logistic, prefit=True)
X_new = model.transform(X)
# Get back the kept features as a DataFrame with dropped columns as all 0s
selected_features = pd.DataFrame(model.inverse_transform(X_new),
index=X.index,
columns=X.columns)
# Dropped columns have values of all 0s, keep other columns
cols_to_keep = selected_features.columns[selected_features.var() != 0]
return cols_to_keep
feature_cols = clicks.columns.drop(['click_time', 'attributed_time', 'is_attributed'])
train, valid, test = get_data_splits(clicks)
X, y = train[feature_cols][:10000], train['is_attributed'][:10000]
selected = select_features_l1(X, y)
q_4.assert_check_passed()
n_samples = 10000
X, y = train[feature_cols][:n_samples], train['is_attributed'][:n_samples]
selected = select_features_l1(X, y)
dropped_columns = feature_cols.drop(selected)
_ = train_model(train.drop(dropped_columns, axis=1),
valid.drop(dropped_columns, axis=1))
"""
Explanation: 4) Use L1 regularization for feature selection
Now try a more powerful approach using L1 regularization. Implement a function select_features_l1 that returns a list of features to keep.
Use a LogisticRegression classifier model with an L1 penalty to select the features. For the model, set:
- the random state to 7,
- the regularization parameter to 0.1,
- and the solver to 'liblinear'.
Fit the model then use SelectFromModel to return a model with the selected features.
The checking code will run your function on a sample from the dataset to provide more immediate feedback.
End of explanation
"""
# Check your answer (Run this code cell to receive credit!)
q_5.solution()
"""
Explanation: 5) Feature Selection with Trees
Since we're using a tree-based model, using another tree-based model for feature selection might produce better results. What would you do different to select the features using a trees classifier?
Run the following line after you've decided your answer.
End of explanation
"""
# Check your answer (Run this code cell to receive credit!)
q_6.solution()
"""
Explanation: 6) Top K features with L1 regularization
Here you've set the regularization parameter C=0.1 which led to some number of features being dropped. However, by setting C you aren't able to choose a certain number of features to keep. What would you do to keep the top K important features using L1 regularization?
Run the following line after you've decided your answer.
End of explanation
"""
|
statsmodels/statsmodels.github.io | v0.13.1/examples/notebooks/generated/interactions_anova.ipynb | bsd-3-clause | %matplotlib inline
from urllib.request import urlopen
import numpy as np
np.set_printoptions(precision=4, suppress=True)
import pandas as pd
pd.set_option("display.width", 100)
import matplotlib.pyplot as plt
from statsmodels.formula.api import ols
from statsmodels.graphics.api import interaction_plot, abline_plot
from statsmodels.stats.anova import anova_lm
try:
salary_table = pd.read_csv("salary.table")
except: # recent pandas can read URL without urlopen
url = "http://stats191.stanford.edu/data/salary.table"
fh = urlopen(url)
salary_table = pd.read_table(fh)
salary_table.to_csv("salary.table")
E = salary_table.E
M = salary_table.M
X = salary_table.X
S = salary_table.S
"""
Explanation: Interactions and ANOVA
Note: This script is based heavily on Jonathan Taylor's class notes https://web.stanford.edu/class/stats191/notebooks/Interactions.html
Download and format data:
End of explanation
"""
plt.figure(figsize=(6, 6))
symbols = ["D", "^"]
colors = ["r", "g", "blue"]
factor_groups = salary_table.groupby(["E", "M"])
for values, group in factor_groups:
i, j = values
plt.scatter(group["X"], group["S"], marker=symbols[j], color=colors[i - 1], s=144)
plt.xlabel("Experience")
plt.ylabel("Salary")
"""
Explanation: Take a look at the data:
End of explanation
"""
formula = "S ~ C(E) + C(M) + X"
lm = ols(formula, salary_table).fit()
print(lm.summary())
"""
Explanation: Fit a linear model:
End of explanation
"""
lm.model.exog[:5]
"""
Explanation: Have a look at the created design matrix:
End of explanation
"""
lm.model.data.orig_exog[:5]
"""
Explanation: Or since we initially passed in a DataFrame, we have a DataFrame available in
End of explanation
"""
lm.model.data.frame[:5]
"""
Explanation: We keep a reference to the original untouched data in
End of explanation
"""
infl = lm.get_influence()
print(infl.summary_table())
"""
Explanation: Influence statistics
End of explanation
"""
df_infl = infl.summary_frame()
df_infl[:5]
"""
Explanation: or get a dataframe
End of explanation
"""
resid = lm.resid
plt.figure(figsize=(6, 6))
for values, group in factor_groups:
i, j = values
group_num = i * 2 + j - 1 # for plotting purposes
x = [group_num] * len(group)
plt.scatter(
x,
resid[group.index],
marker=symbols[j],
color=colors[i - 1],
s=144,
edgecolors="black",
)
plt.xlabel("Group")
plt.ylabel("Residuals")
"""
Explanation: Now plot the residuals within the groups separately:
End of explanation
"""
interX_lm = ols("S ~ C(E) * X + C(M)", salary_table).fit()
print(interX_lm.summary())
"""
Explanation: Now we will test some interactions using anova or f_test
End of explanation
"""
from statsmodels.stats.api import anova_lm
table1 = anova_lm(lm, interX_lm)
print(table1)
interM_lm = ols("S ~ X + C(E)*C(M)", data=salary_table).fit()
print(interM_lm.summary())
table2 = anova_lm(lm, interM_lm)
print(table2)
"""
Explanation: Do an ANOVA check
End of explanation
"""
interM_lm.model.data.orig_exog[:5]
"""
Explanation: The design matrix as a DataFrame
End of explanation
"""
interM_lm.model.exog
interM_lm.model.exog_names
infl = interM_lm.get_influence()
resid = infl.resid_studentized_internal
plt.figure(figsize=(6, 6))
for values, group in factor_groups:
i, j = values
idx = group.index
plt.scatter(
X[idx],
resid[idx],
marker=symbols[j],
color=colors[i - 1],
s=144,
edgecolors="black",
)
plt.xlabel("X")
plt.ylabel("standardized resids")
"""
Explanation: The design matrix as an ndarray
End of explanation
"""
drop_idx = abs(resid).argmax()
print(drop_idx) # zero-based index
idx = salary_table.index.drop(drop_idx)
lm32 = ols("S ~ C(E) + X + C(M)", data=salary_table, subset=idx).fit()
print(lm32.summary())
print("\n")
interX_lm32 = ols("S ~ C(E) * X + C(M)", data=salary_table, subset=idx).fit()
print(interX_lm32.summary())
print("\n")
table3 = anova_lm(lm32, interX_lm32)
print(table3)
print("\n")
interM_lm32 = ols("S ~ X + C(E) * C(M)", data=salary_table, subset=idx).fit()
table4 = anova_lm(lm32, interM_lm32)
print(table4)
print("\n")
"""
Explanation: Looks like one observation is an outlier.
End of explanation
"""
resid = interM_lm32.get_influence().summary_frame()["standard_resid"]
plt.figure(figsize=(6, 6))
resid = resid.reindex(X.index)
for values, group in factor_groups:
i, j = values
idx = group.index
plt.scatter(
X.loc[idx],
resid.loc[idx],
marker=symbols[j],
color=colors[i - 1],
s=144,
edgecolors="black",
)
plt.xlabel("X[~[32]]")
plt.ylabel("standardized resids")
"""
Explanation: Replot the residuals
End of explanation
"""
lm_final = ols("S ~ X + C(E)*C(M)", data=salary_table.drop([drop_idx])).fit()
mf = lm_final.model.data.orig_exog
lstyle = ["-", "--"]
plt.figure(figsize=(6, 6))
for values, group in factor_groups:
i, j = values
idx = group.index
plt.scatter(
X[idx],
S[idx],
marker=symbols[j],
color=colors[i - 1],
s=144,
edgecolors="black",
)
# drop NA because there is no idx 32 in the final model
fv = lm_final.fittedvalues.reindex(idx).dropna()
x = mf.X.reindex(idx).dropna()
plt.plot(x, fv, ls=lstyle[j], color=colors[i - 1])
plt.xlabel("Experience")
plt.ylabel("Salary")
"""
Explanation: Plot the fitted values
End of explanation
"""
U = S - X * interX_lm32.params["X"]
plt.figure(figsize=(6, 6))
interaction_plot(
E, M, U, colors=["red", "blue"], markers=["^", "D"], markersize=10, ax=plt.gca()
)
"""
Explanation: From our first look at the data, the difference between Master's and PhD in the management group is different than in the non-management group. This is an interaction between the two qualitative variables management,M and education,E. We can visualize this by first removing the effect of experience, then plotting the means within each of the 6 groups using interaction.plot.
End of explanation
"""
try:
jobtest_table = pd.read_table("jobtest.table")
except: # do not have data already
url = "http://stats191.stanford.edu/data/jobtest.table"
jobtest_table = pd.read_table(url)
factor_group = jobtest_table.groupby(["MINORITY"])
fig, ax = plt.subplots(figsize=(6, 6))
colors = ["purple", "green"]
markers = ["o", "v"]
for factor, group in factor_group:
ax.scatter(
group["TEST"],
group["JPERF"],
color=colors[factor],
marker=markers[factor],
s=12 ** 2,
)
ax.set_xlabel("TEST")
ax.set_ylabel("JPERF")
min_lm = ols("JPERF ~ TEST", data=jobtest_table).fit()
print(min_lm.summary())
fig, ax = plt.subplots(figsize=(6, 6))
for factor, group in factor_group:
ax.scatter(
group["TEST"],
group["JPERF"],
color=colors[factor],
marker=markers[factor],
s=12 ** 2,
)
ax.set_xlabel("TEST")
ax.set_ylabel("JPERF")
fig = abline_plot(model_results=min_lm, ax=ax)
min_lm2 = ols("JPERF ~ TEST + TEST:MINORITY", data=jobtest_table).fit()
print(min_lm2.summary())
fig, ax = plt.subplots(figsize=(6, 6))
for factor, group in factor_group:
ax.scatter(
group["TEST"],
group["JPERF"],
color=colors[factor],
marker=markers[factor],
s=12 ** 2,
)
fig = abline_plot(
intercept=min_lm2.params["Intercept"],
slope=min_lm2.params["TEST"],
ax=ax,
color="purple",
)
fig = abline_plot(
intercept=min_lm2.params["Intercept"],
slope=min_lm2.params["TEST"] + min_lm2.params["TEST:MINORITY"],
ax=ax,
color="green",
)
min_lm3 = ols("JPERF ~ TEST + MINORITY", data=jobtest_table).fit()
print(min_lm3.summary())
fig, ax = plt.subplots(figsize=(6, 6))
for factor, group in factor_group:
ax.scatter(
group["TEST"],
group["JPERF"],
color=colors[factor],
marker=markers[factor],
s=12 ** 2,
)
fig = abline_plot(
intercept=min_lm3.params["Intercept"],
slope=min_lm3.params["TEST"],
ax=ax,
color="purple",
)
fig = abline_plot(
intercept=min_lm3.params["Intercept"] + min_lm3.params["MINORITY"],
slope=min_lm3.params["TEST"],
ax=ax,
color="green",
)
min_lm4 = ols("JPERF ~ TEST * MINORITY", data=jobtest_table).fit()
print(min_lm4.summary())
fig, ax = plt.subplots(figsize=(8, 6))
for factor, group in factor_group:
ax.scatter(
group["TEST"],
group["JPERF"],
color=colors[factor],
marker=markers[factor],
s=12 ** 2,
)
fig = abline_plot(
intercept=min_lm4.params["Intercept"],
slope=min_lm4.params["TEST"],
ax=ax,
color="purple",
)
fig = abline_plot(
intercept=min_lm4.params["Intercept"] + min_lm4.params["MINORITY"],
slope=min_lm4.params["TEST"] + min_lm4.params["TEST:MINORITY"],
ax=ax,
color="green",
)
# is there any effect of MINORITY on slope or intercept?
table5 = anova_lm(min_lm, min_lm4)
print(table5)
# is there any effect of MINORITY on intercept
table6 = anova_lm(min_lm, min_lm3)
print(table6)
# is there any effect of MINORITY on slope
table7 = anova_lm(min_lm, min_lm2)
print(table7)
# is it just the slope or both?
table8 = anova_lm(min_lm2, min_lm4)
print(table8)
"""
Explanation: Minority Employment Data
End of explanation
"""
try:
rehab_table = pd.read_csv("rehab.table")
except:
url = "http://stats191.stanford.edu/data/rehab.csv"
rehab_table = pd.read_table(url, delimiter=",")
rehab_table.to_csv("rehab.table")
fig, ax = plt.subplots(figsize=(8, 6))
fig = rehab_table.boxplot("Time", "Fitness", ax=ax, grid=False)
rehab_lm = ols("Time ~ C(Fitness)", data=rehab_table).fit()
table9 = anova_lm(rehab_lm)
print(table9)
print(rehab_lm.model.data.orig_exog)
print(rehab_lm.summary())
"""
Explanation: One-way ANOVA
End of explanation
"""
try:
kidney_table = pd.read_table("./kidney.table")
except:
url = "http://stats191.stanford.edu/data/kidney.table"
kidney_table = pd.read_csv(url, delim_whitespace=True)
"""
Explanation: Two-way ANOVA
End of explanation
"""
kidney_table.head(10)
"""
Explanation: Explore the dataset
End of explanation
"""
kt = kidney_table
plt.figure(figsize=(8, 6))
fig = interaction_plot(
kt["Weight"],
kt["Duration"],
np.log(kt["Days"] + 1),
colors=["red", "blue"],
markers=["D", "^"],
ms=10,
ax=plt.gca(),
)
"""
Explanation: Balanced panel
End of explanation
"""
kidney_lm = ols("np.log(Days+1) ~ C(Duration) * C(Weight)", data=kt).fit()
table10 = anova_lm(kidney_lm)
print(
anova_lm(ols("np.log(Days+1) ~ C(Duration) + C(Weight)", data=kt).fit(), kidney_lm)
)
print(
anova_lm(
ols("np.log(Days+1) ~ C(Duration)", data=kt).fit(),
ols("np.log(Days+1) ~ C(Duration) + C(Weight, Sum)", data=kt).fit(),
)
)
print(
anova_lm(
ols("np.log(Days+1) ~ C(Weight)", data=kt).fit(),
ols("np.log(Days+1) ~ C(Duration) + C(Weight, Sum)", data=kt).fit(),
)
)
"""
Explanation: You have things available in the calling namespace available in the formula evaluation namespace
End of explanation
"""
sum_lm = ols("np.log(Days+1) ~ C(Duration, Sum) * C(Weight, Sum)", data=kt).fit()
print(anova_lm(sum_lm))
print(anova_lm(sum_lm, typ=2))
print(anova_lm(sum_lm, typ=3))
nosum_lm = ols(
"np.log(Days+1) ~ C(Duration, Treatment) * C(Weight, Treatment)", data=kt
).fit()
print(anova_lm(nosum_lm))
print(anova_lm(nosum_lm, typ=2))
print(anova_lm(nosum_lm, typ=3))
"""
Explanation: Sum of squares
Illustrates the use of different types of sums of squares (I,II,II)
and how the Sum contrast can be used to produce the same output between
the 3.
Types I and II are equivalent under a balanced design.
Do not use Type III with non-orthogonal contrast - ie., Treatment
End of explanation
"""
|
DHBern/Tools-and-Techniques | lessons/07 Using the Maps API.ipynb | gpl-3.0 | import requests
"""
Explanation: Using the Google Maps API
A lot of the Google Maps geographic functionality can be got at programmatically! This can be really useful for getting information about a place, even when you don't want to show it on a map.
Here we will look at the 'Google Places' web API, and along the way we'll learn how to use web service APIs in general. We saw a little bit of this with text collation, so this time we'll look a little more carefully at how the web part works.
The first thing we will need is a Python library for talking to the Web. The best one out there at the moment is the 'requests' library, which doesn't come standard in Python but which you probably already have installed. If this doesn't work, go to a command / terminal window and type
pip install requests
or
pip3 install requests
if you are on a Mac.
End of explanation
"""
search_endpoint = 'https://maps.googleapis.com/maps/api/place/textsearch/json'
search_params = {
'query': 'Länggass Stübli',
'key': 'AIzaSyCNx-klDCfhopV6W_QPFZ0iwv5sp1J0XwQ',
'language': 'en'
}
r = requests.get( search_endpoint, params=search_params)
r.json() # See what we got
"""
Explanation: Place text search
You have some place name, and you want to find out where it is. We do this using two API functions provided by Google, first to get the ID of the place, and then to get more information about it.
Google, like Zotero, requires an API key to use its web API. This is because Google sets limits to how much you can use it for free. The limits should plenty for a normal person's use, though.
For the time being, you can use the API key that I have put in ILIAS. If you are going to do your own work with Maps, though, then you should go to http://developers.google.com/ and sign up as a developer. You'll then need to create a project, and in that project go to 'API keys & auth' -> and set up access for the API you want to use. The one we're using today is the Google Places API Web Service. You'll then need to go to 'Credentials' to make your API key. If you need further help with the options, then talk to me!
Google's documentation for the Places API is here: https://developers.google.com/places/webservice/intro
and today we'll be using the Place Search and Place Details functions.
End of explanation
"""
search_result = r.json()
details_endpoint = 'https://maps.googleapis.com/maps/api/place/details/json'
dparams = {
'key': 'AIzaSyCNx-klDCfhopV6W_QPFZ0iwv5sp1J0XwQ',
'placeid': search_result['results'][0]['place_id'],
'language': 'en'
}
r = requests.get( details_endpoint, params=dparams)
r.json() # See what we got
"""
Explanation: Place details - get more information once we have a place
Once we have successfully looked up a place, we will have an ID for it. This is Google's way of distinguishing between places of the same name, so that we know we have the right one. We can use that ID to get information about a place we have already looked up with the details API function.
End of explanation
"""
places_to_lookup = ['Moskva', 'Venice', 'Rosslyn Chapel', 'Cantabrigia']
places_found = {}
for p in places_to_lookup:
myparams = {
'query': p,
'key': 'AIzaSyCNx-klDCfhopV6W_QPFZ0iwv5sp1J0XwQ',
'language': 'en'
}
myr = requests.get( search_endpoint, params=myparams )
myresult = myr.json()
if 'results' in myresult and len(myresult['results']) > 0:
print("Found information for %s" % p)
places_found[p] = myresult['results'][0]
places_found
"""
Explanation: Now let's look up a series of places! We'll store our results in places_found, for each place that we find.
End of explanation
"""
import csv
f = open('myplaces.csv', 'w', newline='', encoding='utf-8')
writer = csv.writer(f)
# First, write our column headers!
writer.writerow(['Place name', 'Address', 'ID', 'Latitude', 'Longitude'])
"""
Explanation: Exporting our data to CSV
One thing we can do with the Places API is to look up a bunch of places, get their latitude and longitude or their canonical names, and put those into a big spreadsheet for use elsewhere (or even for importing into Google Maps to make a map!)
The easiest way to make something like a spreadsheet in a computer program is to use CSV, which stands for comma separated values. That is what we used earlier to get our UK fat supply data into our map. Python has a built-in module for this, and we use it like this to make a CSV file.
End of explanation
"""
for p in places_found.keys():
place_info = places_found[p]
address = place_info['formatted_address']
placeid = place_info['place_id']
latitude = place_info['geometry']['location']['lat']
longitude = place_info['geometry']['location']['lng']
writer.writerow([p, address, placeid, latitude, longitude])
f.close() # Always close what you open, if you didn't use 'with'!
"""
Explanation: Now we have an open file called 'myplaces.csv', and we have written one row to it. If you were to close the filehandle now and look at the file, you would see that it looks like this:
Place name,Address,ID,Latitude,Longitude
But we won't close the file yet, because we want to write each of our places into its row.
End of explanation
"""
with open('myplaces.csv', encoding='utf-8') as f:
data = f.read()
print(data)
"""
Explanation: Now we can make sure the file is there and has what we expect!
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/inm/cmip6/models/sandbox-1/atmos.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'inm', 'sandbox-1', 'atmos')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmos
MIP Era: CMIP6
Institute: INM
Source ID: SANDBOX-1
Topic: Atmos
Sub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos.
Properties: 156 (127 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:05
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties --> Overview
2. Key Properties --> Resolution
3. Key Properties --> Timestepping
4. Key Properties --> Orography
5. Grid --> Discretisation
6. Grid --> Discretisation --> Horizontal
7. Grid --> Discretisation --> Vertical
8. Dynamical Core
9. Dynamical Core --> Top Boundary
10. Dynamical Core --> Lateral Boundary
11. Dynamical Core --> Diffusion Horizontal
12. Dynamical Core --> Advection Tracers
13. Dynamical Core --> Advection Momentum
14. Radiation
15. Radiation --> Shortwave Radiation
16. Radiation --> Shortwave GHG
17. Radiation --> Shortwave Cloud Ice
18. Radiation --> Shortwave Cloud Liquid
19. Radiation --> Shortwave Cloud Inhomogeneity
20. Radiation --> Shortwave Aerosols
21. Radiation --> Shortwave Gases
22. Radiation --> Longwave Radiation
23. Radiation --> Longwave GHG
24. Radiation --> Longwave Cloud Ice
25. Radiation --> Longwave Cloud Liquid
26. Radiation --> Longwave Cloud Inhomogeneity
27. Radiation --> Longwave Aerosols
28. Radiation --> Longwave Gases
29. Turbulence Convection
30. Turbulence Convection --> Boundary Layer Turbulence
31. Turbulence Convection --> Deep Convection
32. Turbulence Convection --> Shallow Convection
33. Microphysics Precipitation
34. Microphysics Precipitation --> Large Scale Precipitation
35. Microphysics Precipitation --> Large Scale Cloud Microphysics
36. Cloud Scheme
37. Cloud Scheme --> Optical Cloud Properties
38. Cloud Scheme --> Sub Grid Scale Water Distribution
39. Cloud Scheme --> Sub Grid Scale Ice Distribution
40. Observation Simulation
41. Observation Simulation --> Isscp Attributes
42. Observation Simulation --> Cosp Attributes
43. Observation Simulation --> Radar Inputs
44. Observation Simulation --> Lidar Inputs
45. Gravity Waves
46. Gravity Waves --> Orographic Gravity Waves
47. Gravity Waves --> Non Orographic Gravity Waves
48. Solar
49. Solar --> Solar Pathways
50. Solar --> Solar Constant
51. Solar --> Orbital Parameters
52. Solar --> Insolation Ozone
53. Volcanos
54. Volcanos --> Volcanoes Treatment
1. Key Properties --> Overview
Top level key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmosphere model code (CAM 4.0, ARPEGE 3.2,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of atmospheric model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the atmosphere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Resolution
Characteristics of the model resolution
2.1. Horizontal Resolution Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.4. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on the computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 2.5. High Top
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping
Characteristics of the atmosphere model time stepping
3.1. Timestep Dynamics
Is Required: TRUE Type: STRING Cardinality: 1.1
Timestep for the dynamics, e.g. 30 min.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.2. Timestep Shortwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the shortwave radiative transfer, e.g. 1.5 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestep Longwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the longwave radiative transfer, e.g. 3 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Orography
Characteristics of the model orography
4.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the orography.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
"""
Explanation: 4.2. Changes
Is Required: TRUE Type: ENUM Cardinality: 1.N
If the orography type is modified describe the time adaptation changes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid --> Discretisation
Atmosphere grid discretisation
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of grid discretisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Discretisation --> Horizontal
Atmosphere discretisation in the horizontal
6.1. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
"""
Explanation: 6.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.3. Scheme Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation function order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.4. Horizontal Pole
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal discretisation pole singularity treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.5. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Discretisation --> Vertical
Atmosphere discretisation in the vertical
7.1. Coordinate Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Type of vertical coordinate system
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Dynamical Core
Characteristics of the dynamical core
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere dynamical core
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the dynamical core of the model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Timestepping Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Timestepping framework type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of the model prognostic variables
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9. Dynamical Core --> Top Boundary
Type of boundary layer at the top of the model
9.1. Top Boundary Condition
Is Required: TRUE Type: ENUM Cardinality: 1.1
Top boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Top Heat
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary heat treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Top Wind
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary wind treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Dynamical Core --> Lateral Boundary
Type of lateral boundary condition (if the model is a regional model)
10.1. Condition
Is Required: FALSE Type: ENUM Cardinality: 0.1
Type of lateral boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Dynamical Core --> Diffusion Horizontal
Horizontal diffusion scheme
11.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Horizontal diffusion scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal diffusion scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12. Dynamical Core --> Advection Tracers
Tracer advection scheme
12.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Tracer advection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.3. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.4. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracer advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Dynamical Core --> Advection Momentum
Momentum advection scheme
13.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Momentum advection schemes name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.3. Scheme Staggering Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme staggering type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.4. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Radiation
Characteristics of the atmosphere radiation process
14.1. Aerosols
Is Required: TRUE Type: ENUM Cardinality: 1.N
Aerosols whose radiative effect is taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Radiation --> Shortwave Radiation
Properties of the shortwave radiation scheme
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of shortwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Shortwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Shortwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Shortwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Radiation --> Shortwave GHG
Representation of greenhouse gases in the shortwave radiation scheme
16.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Radiation --> Shortwave Cloud Ice
Shortwave radiative properties of ice crystals in clouds
17.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Radiation --> Shortwave Cloud Liquid
Shortwave radiative properties of liquid droplets in clouds
18.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Radiation --> Shortwave Cloud Inhomogeneity
Cloud inhomogeneity in the shortwave radiation scheme
19.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Radiation --> Shortwave Aerosols
Shortwave radiative properties of aerosols
20.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21. Radiation --> Shortwave Gases
Shortwave radiative properties of gases
21.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Radiation --> Longwave Radiation
Properties of the longwave radiation scheme
22.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of longwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the longwave radiation scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Longwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Longwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 22.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Longwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiation --> Longwave GHG
Representation of greenhouse gases in the longwave radiation scheme
23.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24. Radiation --> Longwave Cloud Ice
Longwave radiative properties of ice crystals in clouds
24.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.2. Physical Reprenstation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Radiation --> Longwave Cloud Liquid
Longwave radiative properties of liquid droplets in clouds
25.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Radiation --> Longwave Cloud Inhomogeneity
Cloud inhomogeneity in the longwave radiation scheme
26.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Radiation --> Longwave Aerosols
Longwave radiative properties of aerosols
27.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 28. Radiation --> Longwave Gases
Longwave radiative properties of gases
28.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Turbulence Convection
Atmosphere Convective Turbulence and Clouds
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere convection and turbulence
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30. Turbulence Convection --> Boundary Layer Turbulence
Properties of the boundary layer turbulence scheme
30.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Boundary layer turbulence scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Boundary layer turbulence scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Closure Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Boundary layer turbulence scheme closure order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Counter Gradient
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Uses boundary layer turbulence scheme counter gradient
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 31. Turbulence Convection --> Deep Convection
Properties of the deep convection scheme
31.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Deep convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of deep convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Turbulence Convection --> Shallow Convection
Properties of the shallow convection scheme
32.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Shallow convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
shallow convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
"""
Explanation: 32.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
shallow convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33. Microphysics Precipitation
Large Scale Cloud Microphysics and Precipitation
33.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of large scale cloud microphysics and precipitation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Microphysics Precipitation --> Large Scale Precipitation
Properties of the large scale precipitation scheme
34.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the large scale precipitation parameterisation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 34.2. Hydrometeors
Is Required: TRUE Type: ENUM Cardinality: 1.N
Precipitating hydrometeors taken into account in the large scale precipitation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Properties of the large scale cloud microphysics scheme
35.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the microphysics parameterisation scheme used for large scale clouds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 35.2. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Large scale cloud microphysics processes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36. Cloud Scheme
Characteristics of the cloud scheme
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the atmosphere cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
"""
Explanation: 36.3. Atmos Coupling
Is Required: FALSE Type: ENUM Cardinality: 0.N
Atmosphere components that are linked to the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.4. Uses Separate Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Different cloud schemes for the different types of clouds (convective, stratiform and boundary layer)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Processes included in the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.6. Prognostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a prognostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.7. Diagnostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a diagnostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.8. Prognostic Variables
Is Required: FALSE Type: ENUM Cardinality: 0.N
List the prognostic variables used by the cloud scheme, if applicable.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 37. Cloud Scheme --> Optical Cloud Properties
Optical cloud properties
37.1. Cloud Overlap Method
Is Required: FALSE Type: ENUM Cardinality: 0.1
Method for taking into account overlapping of cloud layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.2. Cloud Inhomogeneity
Is Required: FALSE Type: STRING Cardinality: 0.1
Method for taking into account cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Sub-grid scale water distribution
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale water distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 38.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale water distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 38.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale water distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 38.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale water distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Sub-grid scale ice distribution
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale ice distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 39.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale ice distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 39.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale ice distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 39.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale ice distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 40. Observation Simulation
Characteristics of observation simulation
40.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of observation simulator characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41. Observation Simulation --> Isscp Attributes
ISSCP Characteristics
41.1. Top Height Estimation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator ISSCP top height estimation methodUo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41.2. Top Height Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator ISSCP top height direction
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 42. Observation Simulation --> Cosp Attributes
CFMIP Observational Simulator Package attributes
42.1. Run Configuration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator COSP run configuration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.2. Number Of Grid Points
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of grid points
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.3. Number Of Sub Columns
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of sub-cloumns used to simulate sub-grid variability
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.4. Number Of Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of levels
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 43. Observation Simulation --> Radar Inputs
Characteristics of the cloud radar simulator
43.1. Frequency
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Cloud simulator radar frequency (Hz)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 43.2. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator radar type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.3. Gas Absorption
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses gas absorption
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.4. Effective Radius
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses effective radius
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44. Observation Simulation --> Lidar Inputs
Characteristics of the cloud lidar simulator
44.1. Ice Types
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator lidar ice type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44.2. Overlap
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator lidar overlap
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 45. Gravity Waves
Characteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources.
45.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of gravity wave parameterisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.2. Sponge Layer
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sponge layer in the upper levels in order to avoid gravity wave reflection at the top.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.3. Background
Is Required: TRUE Type: ENUM Cardinality: 1.1
Background wave distribution
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.4. Subgrid Scale Orography
Is Required: TRUE Type: ENUM Cardinality: 1.N
Subgrid scale orography effects taken into account.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 46. Gravity Waves --> Orographic Gravity Waves
Gravity waves generated due to the presence of orography
46.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 47. Gravity Waves --> Non Orographic Gravity Waves
Gravity waves generated by non-orographic processes.
47.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the non-orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
"""
Explanation: 47.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 48. Solar
Top of atmosphere solar insolation characteristics
48.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of solar insolation of the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 49. Solar --> Solar Pathways
Pathways for solar forcing of the atmosphere
49.1. Pathways
Is Required: TRUE Type: ENUM Cardinality: 1.N
Pathways for the solar forcing of the atmosphere model domain
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 50. Solar --> Solar Constant
Solar constant and top of atmosphere insolation characteristics
50.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the solar constant.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 50.2. Fixed Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If the solar constant is fixed, enter the value of the solar constant (W m-2).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 50.3. Transient Characteristics
Is Required: TRUE Type: STRING Cardinality: 1.1
solar constant transient characteristics (W m-2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 51. Solar --> Orbital Parameters
Orbital parameters and top of atmosphere insolation characteristics
51.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 51.2. Fixed Reference Date
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Reference date for fixed orbital parameters (yyyy)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 51.3. Transient Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Description of transient orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 51.4. Computation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used for computing orbital parameters.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 52. Solar --> Insolation Ozone
Impact of solar insolation on stratospheric ozone
52.1. Solar Ozone Impact
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does top of atmosphere insolation impact on stratospheric ozone?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 53. Volcanos
Characteristics of the implementation of volcanoes
53.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the implementation of volcanic effects in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 54. Volcanos --> Volcanoes Treatment
Treatment of volcanoes in the atmosphere
54.1. Volcanoes Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How volcanic effects are modeled in the atmosphere.
End of explanation
"""
|
fangohr/plot_vtk_matplotlib | tutorial/plot_vtk_matplotlib_tutorial.ipynb | bsd-2-clause | %matplotlib inline
# To generate the vector fields
import dolfin as df
import mshr
import numpy as np
import plot_vtk_matplotlib as pvm
# Matplotlib parameters can be tuned with rc.Params
# This library has modified values. For example:
# matplotlib.rcParams['font.size'] = 22
"""
Explanation: Plot VTK files using Matplotlib
This library is useful to plot vector fields stored in VTK files (e.g. vtu or vtk) using Matplotlib
Libraries
End of explanation
"""
mesh = mshr.Rectangle(df.Point(-3, -3), df.Point(3, 3))
mesh = mshr.generate_mesh(mesh, 10)
vspace = df.VectorFunctionSpace(mesh, 'Lagrange', degree=1, dim=2)
vfunction = df.Expression(("sin(x[1])", "cos(x[0])"))
vfunction = df.interpolate(vfunction, vspace)
# df.plot(vfunction, interactive=True)
"""
Explanation: Generate 2D Vector Field using Dolfin
We will generate a random two dimensional vector field using Fenics (Dolfin) in a 3x3 square mesh
End of explanation
"""
_file = df.File('dolfin_vector_field_2d.pvd')
_file << vfunction
"""
Explanation: Now we can save the data in a VTK file. By default, Fenics saves XML files (instead of binary) using an unstructured grid, with the .vtu extension
End of explanation
"""
# Load our skyrmionic state
vf_plot = pvm.plot_vtk_matplotlib('./dolfin_vector_field_2d000000.vtu',
z_max=1, z_min=-1,
)
# Extract the data from the file
vf_plot.extract_data(# vtkfiletype=XMLStructuredGrid
)
"""
Explanation: Consequently, we initialise the plotting library passing the path to the VTK file. When loading the data, we must specify the range of spatial z values from where a slice of data is going to be plotted.
After starting the plot_vtk_matplotlib class with the VTK file path, it is necessary to extract the data. The vtkXMLUnstructuredGrid is the default format option to load the data from the .vtu file. We can change this option using the vtkfiletype argument (see the documentation for more details).
End of explanation
"""
# Plot the y component and arrows in black
vf_plot.plot_vtk(-3, 3,
-3, 3,
# nx=50, ny=50,
v_component='vy',
hsv_map='2d',
# figsize=(10, 10),
# savefig='hsv_map.pdf',
colorbar=True,
quiver_type='raw_colour',
)
"""
Explanation: We now start exprimenting with the options. One of the most interesting functions is plot_vtk which shows an interpolated colormap using the data in the slice specified when extracting the information.
One of the nicest features of this function is to use a HSV mapping to show the orientation of the in plane components of the vectors, which can be mapped in the range 0 to 2 $\pi$.
By default, the quiver plot on top is shown with the raw data and a inverted colormap (if not HSV) or as black arrows if using the HSV option. The arrows color can be modified playing with the quiver_type options, which can also specify if the vector field arrows are going to be interpolated in a regular grid using the data from the slice. The HSV mapping uses SciPy.
In this case we use the the '2d' option for the hsv_map option since we have a 2D vector field. The VTK file in this case has the z components set as zero and if using '3d' the background would look black.
The arrows in the quiver plot are drawn at the center of the grid nodes (in Matplotlib their are drawn from the tail, as default). This option can be changed using the pivot option.
The colormap is interpolated in an nx by ny grid.
End of explanation
"""
# Plot the y component and arrows in black
vf_plot.plot_vtk(-3, 3,
-3, 3,
nx=50, ny=50,
v_component='vx',
# cmap='RdBu',
interpolator='natgrid',
interpolator_method='linear',
# figsize=(10, 10),
colorbar=True,
# quiver_type='raw_colour',
)
"""
Explanation: The default option is to use a colormap for the interpolated data. We can also specify to use natgrid instead of scipy for interpolating the data. The linear method is better than the default 'nn' (natural neighbours) but still a bad approximation, compared with SciPy.
End of explanation
"""
# Quiver plot
vf_plot.plot_quiver(quiver_type='interpolated_cmap',
x_min=-3, x_max=3,
y_min=-3, y_max=3,
linewidth=0.8,
scale=1 / 0.06,
width=0.007,
alpha=0.8,
v_component='vx',
# pivot='tail'
# savefig='vector_field.png'
)
"""
Explanation: We can also use a quiver plot, which can also be interpolated if necessary. Extra arguments can be passed to control the arrow definitions. The default interpolation is made with SciPy and a linear method. These options can be changed with interpolator and interpolator_method respectively.
End of explanation
"""
mesh = mshr.Sphere(df.Point(0, 0), 5)
mesh = mshr.generate_mesh(mesh, 10)
vspace = df.VectorFunctionSpace(mesh, 'Lagrange', degree=1, dim=3)
vfunction = df.Expression(("sin(x[1])", "cos(x[0])", "sin(x[2])"))
vfunction = df.interpolate(vfunction, vspace)
# We can plot the field using Dolfin
# df.plot(vfunction, interactive=True)
# We can plot the mesh
# df.plot(mesh, interactive=True)
"""
Explanation: 3D Vector Field
We will now generate a 3D vector field inside a sphere to test how the slicing works:
End of explanation
"""
_file = df.File('dolfin_sphere_vector_field_3d.pvd')
_file << vfunction
"""
Explanation: We save the data as before:
End of explanation
"""
# Load our skyrmionic state
vf_plot = pvm.plot_vtk_matplotlib('./dolfin_sphere_vector_field_3d000000.vtu',
z_max=0.8, z_min=-0.8)
# Extract the data from the file
vf_plot.extract_data()
"""
Explanation: Now we load the function and specify a slice in the middle of the sphere
End of explanation
"""
# plot the x component and arrows in black
vf_plot.plot_vtk(-5, 5,
-5, 5,
nx=100, ny=100,
v_component='vz',
hsv_map='3d',
# figsize=(10, 8),
colorbar=True,
quiver_type='raw_colour',
)
"""
Explanation: If we plot the slice with a 3D HSV mapping, darker regions indicate the lower $z$ magnitudes of the vector field, while the brighter regions indicate the highest $z$ values.
End of explanation
"""
# Quiver plot
vf_plot.plot_quiver(quiver_type='interpolated_cmap',
x_min=-5, x_max=5,
y_min=-5, y_max=5,
linewidth=0.8,
scale=1 / 0.06,
width=0.007,
alpha=0.8,
v_component='vz'
)
"""
Explanation: We can interpolate this slice and get a quiver plot from the vector field
End of explanation
"""
vf_plot.z_min, vf_plot.z_max = 4, 5
# Quiver plot
vf_plot.plot_vtk(-5, 5,
-5, 5,
nx=100, ny=100,
v_component='vy',
hsv_map='2d',
colorbar=True,
quiver_type=None,
)
"""
Explanation: We can also redefine the range of $z$ values where the data is extracted, but this is not very accurate for a mesh in finite elements since many points are not exactly in the slice plane, thus the approximation can be bad. The quiver plot on top can be disabled making the quiver_type as None or False.
End of explanation
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(18, 6))
comps = ['vx', 'vy', 'vz']
for i in range(3):
ax = fig.add_subplot(1, 3, i + 1)
vf_plot.plot_quiver(quiver_type='interpolated_cmap',
x_min=-5, x_max=5,
y_min=-5, y_max=5,
linewidth=0.8,
scale=1 / 0.06,
width=0.007,
alpha=0.8,
cmap='RdYlBu',
v_component=comps[i],
predefined_axis=ax,
)
ax.set_title(comps[i])
# ax.axis('off')
"""
Explanation: Extending with Matplotlib
We can use predefined axes to generate, for instance, grids of plots. We will use the vf_plot object from the previous section
End of explanation
"""
|
iutzeler/Introduction-to-Python-for-Data-Sciences | 3-3_Fancy_Visualization_with_Seaborn.ipynb | mit | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# Create some data
rng = np.random.RandomState(0)
x = np.linspace(0, 10, 500)
y = np.cumsum(rng.randn(500, 3), 0)
plt.plot(x, y)
plt.legend('one two three'.split(' '));
"""
Explanation: <table>
<tr>
<td width=15%><img src="./img/UGA.png"></img></td>
<td><center><h1>Introduction to Python for Data Sciences</h1></center></td>
<td width=15%><a href="http://www.iutzeler.org" style="font-size: 16px; font-weight: bold">Franck Iutzeler</a> </td>
</tr>
</table>
<br/><br/>
<center><a style="font-size: 40pt; font-weight: bold">Chap. 3 - Data Handling with Pandas </a></center>
<br/><br/>
3- Fancy Visualization with Seaborn
Advanced visualization
Seaborn is a package that produces somewhat nicer and more data oriented plots than Matplotlib. It also gives a fresher look to matlotlib plots.
End of explanation
"""
import seaborn as sns
sns.set()
# Same command but now seaborn is set
plt.plot(x, y)
plt.legend('one two three'.split(' '));
"""
Explanation: Let us import seaborn and change the matplotlib style with <tt>sns.set()</tt>
End of explanation
"""
data = np.random.multivariate_normal([0, 1.5], [[1, 0.2], [0.2, 2]], size=2000)
data = pd.DataFrame(data, columns=['x', 'y'])
for col in 'xy':
plt.hist(data[col], alpha=0.5) # alpha=0.5 provides semi-transparent plots
"""
Explanation: Plotting Distributions
Apart from the standard histograms <tt>plt.hist</tt>, Seaborn provides smoothed density plots based on data using <tt>sns.kdeplot</tt> or <tt>sns.displot</tt>.
End of explanation
"""
sns.kdeplot(data['x'])
sns.kdeplot(data['y'],shade=True)
"""
Explanation: <tt>kdeplot</tt> provides density plots from an array or series (<tt>shade=True</tt> provide filled ones).
End of explanation
"""
sns.displot(data['x'])
sns.histplot(data['y'])
"""
Explanation: <tt>displot</tt> is a mix of the two previous ones.
End of explanation
"""
sns.kdeplot(data['x'],y = data['y'], shade=True, thresh=0.05, cmap="Reds", cbar=True)
"""
Explanation: Two-dimensional dataset may be represented by level sets with <tt>kdeplot</tt>.
End of explanation
"""
sns.jointplot(x= "x", y= "y", data = data, kind='kde');
"""
Explanation: Joint distribution and the marginal distributions can be displayed together using <tt>jointplot</tt>
End of explanation
"""
import pandas as pd
import numpy as np
iris = pd.read_csv('data/iris.csv')
print(iris.shape)
iris.head()
sns.pairplot(iris, hue='species')
"""
Explanation: Exploring features correlations and interest to classification
Seaborn provides an efficient tool for quickly exploring different features and classification with <tt>pairplot</tt>.
End of explanation
"""
sns.catplot( x = "species" , y="sepal_length" , data=iris , kind="box")
"""
Explanation: <tt>factorplot</tt> also provides error plots.
End of explanation
"""
irisS = pd.melt(iris,id_vars="species",value_vars=["sepal_length","sepal_width","petal_length","petal_width"])
irisS.head()
sns.catplot( x= "species" , y = "value" , col="variable" , data=irisS , kind="box")
"""
Explanation: Melting dataframes
For displaying classification data, it is sometimes interesting to melt dataframes, that is separating
* id: the classes typically, things that are not numeric, that have to be kept in place (in our case with iris, the species)
* values: the columns corresponding to values (in our case with iris, the sepal_length, sepal_width, etc.)
The command <tt>pd.melt</tt> return a dataframe with as columns: the id, the variable (former column) name, and associated value.
End of explanation
"""
|
katychuang/ipython-notebooks | facebook_posting_activity_part2.ipynb | gpl-2.0 | from _keys.facebook import data_file
import json
with open(data_file) as json_data:
data = json.load(json_data)
print (len(data), "posts in this file")
"""
Explanation: Connecting to Facebook API (part 2)
Written by Kat Chuang @katychuang
Objective
The goal of this exercise is to connect with Facebook Graph Api to collect information about my most recent posts, and also to collect each posts' subsequent comments and likes. This is a continuation from part 1
I collected data from my most recent posts and previously saved json output into a file.json
In this part we try to plot the data into a chart for visual representation (it's a matplot lib heatmap type of visualization).
I saved the filename in a separate config file in a separate local folder, _keys in a file name facebook.py. Inside you want to save a string variable like the following:
data_file="XXXXXX"
End of explanation
"""
from datetime import datetime
from dateutil.parser import parse
# limit to just march posts
march_posts = list(filter(lambda x: parse(x['created_time'][:-5]) >= datetime(2017, 3, 1), data ))
print(len(march_posts), "posts since", datetime(2017, 3, 1).date())
# get days to count occurrence
march_days = list (map( lambda x: parse(x['created_time']).strftime("%A"), march_posts ))
# count number of posts by day of week
for day in march_days: print(day, " \t", march_days.count(day))
"""
Explanation: From the above code, we read the text file and saved the JSON data to the variable data to work with.
We need to pull the time stamps, and their accompanying day of week. We want to convert a list of timestamps into a list of formatted timestamps. But first, we import datetime and parser.
End of explanation
"""
def scrub(raw_timestamp):
timestamp = parser.parse(raw_timestamp)
return dow(timestamp), hod(timestamp)
# returns day of week
def dow(date): return date.strftime("%A") # i.e. Monday
# returns hour of day
def hod(time): return time.strftime("%-I:%M%p") # i.e.
"""
Explanation: Now let's write some utility functions to process the data for the chart.
Scrub turns the raw string type into a datetime type. Then we can pass that into dow() and hod() to format the strings.
End of explanation
"""
yIndex = ["Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday"]
# Get a list of week numbers, 0-3. Note that March starts on week 9 of 2017
# but we subtract 9 to start at index 0
get_week = list (map( lambda x: parse(x['created_time']).isocalendar()[1]-9, march_posts ))
# Get a list of day numbers, 0-6
get_day = list (map( lambda x: yIndex.index(parse(x['created_time']).strftime("%A")), march_posts ))
# create empty array
from itertools import repeat
month = [[0] * 7 for i in repeat(None, 5)]
print(month)
# go thru posts to fill in empty array
for i, (w, d) in enumerate(zip(get_week, get_day)):
month[w][d] = 1
print("active days: \n", month)
"""
Explanation: Now we want to try create nested lists. A month contains weeks, which in turn contains days. To express this in code, it would be something like so:
M = [W, W, W, ...]
W = ["Mon", "Tues, "Wed", "Thu", ... ]
The lists are combined into a list of lists. The
End of explanation
"""
# empty list of lists
activity = [[0] * 7 for i in repeat(None, 5)]
# the total number of posts
limit = len(get_week)
# fill in empty array with a fraction
for i, (w, d) in enumerate(zip(get_week, get_day)):
activity[w][d] += 1/limit
print("activity per day: \n", activity)
"""
Explanation: Now let's step it up and count posts per day, so we can have more than 2 shades of colors on the heatmap.
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
"""
Explanation: Now our data is ready for plotting. Let's do the important config stuff.
End of explanation
"""
fig, ax = plt.subplots()
heatmap = ax.pcolor(activity, cmap=plt.cm.Greens, alpha=0.8)
# put the major ticks at the middle of each cell
ax.set_xticks(np.arange(0,7)+0.5, minor=False)
ax.set_yticks(np.arange(0,5)+0.5, minor=False)
# want a more natural, table-like display
ax.invert_yaxis()
ax.xaxis.tick_top()
# labels
column_labels = ["Mon", "Tues", "Wed", "Thurs", "Fri", "Sat", "Sun"]
ax.set_xticklabels(column_labels, minor=False)
ax.set_yticklabels(list(''), minor=False)
plt.show()
"""
Explanation: Here's how you create a chart of heatmap type, filled in with values from activity.
End of explanation
"""
import seaborn as sns
sns.set(font_scale=1.2)
sns.set_style({"savefig.dpi": 100})
ax = sns.heatmap(activity, cmap=plt.cm.Greens, linewidths=.1)
ax.xaxis.tick_top()
ax.set_xticklabels(column_labels, minor=False)
ax.set_yticklabels(list(''), minor=False)
fig = ax.get_figure()
"""
Explanation: I'm not liking how the borders look like. So I'm going to create another version with the seaborn library.
End of explanation
"""
|
jrg365/gpytorch | examples/03_Multitask_Exact_GPs/ModelList_GP_Regression.ipynb | mit | import math
import torch
import gpytorch
from matplotlib import pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
"""
Explanation: ModelList (Multi-Output) GP Regression
Introduction
This notebook demonstrates how to wrap independent GP models into a convenient Multi-Output GP model using a ModelList.
Unlike in the Multitask case, this do not model correlations between outcomes, but treats outcomes independently. This is equivalent to setting up a separate GP for each outcome, but can be much more convenient to handle, in particular it does not require manually looping over models when fitting or predicting.
This type of model is useful if
- when the number of training / test points is different for the different outcomes
- using different covariance modules and / or likelihoods for each outcome
For block designs (i.e. when the above points do not apply), you should instead use a batch mode GP as described in the batch independent multioutput example. This will be much faster because it uses additional parallelism.
End of explanation
"""
train_x1 = torch.linspace(0, 0.95, 50) + 0.05 * torch.rand(50)
train_x2 = torch.linspace(0, 0.95, 25) + 0.05 * torch.rand(25)
train_y1 = torch.sin(train_x1 * (2 * math.pi)) + 0.2 * torch.randn_like(train_x1)
train_y2 = torch.cos(train_x2 * (2 * math.pi)) + 0.2 * torch.randn_like(train_x2)
"""
Explanation: Set up training data
In the next cell, we set up the training data for this example. We'll be using a different number of training examples for the different GPs.
End of explanation
"""
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super().__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
likelihood1 = gpytorch.likelihoods.GaussianLikelihood()
model1 = ExactGPModel(train_x1, train_y1, likelihood1)
likelihood2 = gpytorch.likelihoods.GaussianLikelihood()
model2 = ExactGPModel(train_x2, train_y2, likelihood2)
"""
Explanation: Set up the sub-models
Each individual model uses the ExactGP model from the simple regression example.
End of explanation
"""
model = gpytorch.models.IndependentModelList(model1, model2)
likelihood = gpytorch.likelihoods.LikelihoodList(model1.likelihood, model2.likelihood)
"""
Explanation: We now collect the submodels in an IndependentMultiOutputGP, and the respective likelihoods in a MultiOutputLikelihood. These are container modules that make it easy to work with multiple outputs. In particular, they will take in and return lists of inputs / outputs and delegate the data to / from the appropriate sub-model (it is important that the order of the inputs / outputs corresponds to the order of models with which the containers were instantiated).
End of explanation
"""
from gpytorch.mlls import SumMarginalLogLikelihood
mll = SumMarginalLogLikelihood(likelihood, model)
"""
Explanation: Set up overall Marginal Log Likelihood
Assuming independence, the MLL for the container model is simply the sum of the MLLs for the individual models. SumMarginalLogLikelihood is a convenient container for this (by default it uses an ExactMarginalLogLikelihood for each submodel)
End of explanation
"""
# this is for running the notebook in our testing framework
import os
smoke_test = ('CI' in os.environ)
training_iterations = 2 if smoke_test else 50
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the Adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.1) # Includes GaussianLikelihood parameters
for i in range(training_iterations):
optimizer.zero_grad()
output = model(*model.train_inputs)
loss = -mll(output, model.train_targets)
loss.backward()
print('Iter %d/%d - Loss: %.3f' % (i + 1, training_iterations, loss.item()))
optimizer.step()
"""
Explanation: Train the model hyperparameters
With the containers in place, the models can be trained in a single loop on the container (note that this means that optimization is performed jointly, which can be an issue if the individual submodels require training via very different step sizes).
End of explanation
"""
# Set into eval mode
model.eval()
likelihood.eval()
# Initialize plots
f, axs = plt.subplots(1, 2, figsize=(8, 3))
# Make predictions (use the same test points)
with torch.no_grad(), gpytorch.settings.fast_pred_var():
test_x = torch.linspace(0, 1, 51)
# This contains predictions for both outcomes as a list
predictions = likelihood(*model(test_x, test_x))
for submodel, prediction, ax in zip(model.models, predictions, axs):
mean = prediction.mean
lower, upper = prediction.confidence_region()
tr_x = submodel.train_inputs[0].detach().numpy()
tr_y = submodel.train_targets.detach().numpy()
# Plot training data as black stars
ax.plot(tr_x, tr_y, 'k*')
# Predictive mean as blue line
ax.plot(test_x.numpy(), mean.numpy(), 'b')
# Shade in confidence
ax.fill_between(test_x.numpy(), lower.detach().numpy(), upper.detach().numpy(), alpha=0.5)
ax.set_ylim([-3, 3])
ax.legend(['Observed Data', 'Mean', 'Confidence'])
ax.set_title('Observed Values (Likelihood)')
None
"""
Explanation: Make predictions with the model
End of explanation
"""
|
dismalpy/dismalpy | doc/notebooks/sarimax_stata.ipynb | bsd-2-clause | %matplotlib inline
import numpy as np
import pandas as pd
from dismalpy import ssm
import matplotlib.pyplot as plt
from datetime import datetime
"""
Explanation: SARIMAX: Introduction
This notebook replicates examples from the Stata ARIMA time series estimation and postestimation documentation.
First, we replicate the four estimation examples http://www.stata.com/manuals13/tsarima.pdf:
ARIMA(1,1,1) model on the U.S. Wholesale Price Index (WPI) dataset.
Variation of example 1 which adds an MA(4) term to the ARIMA(1,1,1) specification to allow for an additive seasonal effect.
ARIMA(2,1,0) x (1,1,0,12) model of monthly airline data. This example allows a multiplicative seasonal effect.
ARMA(1,1) model with exogenous regressors; describes consumption as an autoregressive process on which also the money supply is assumed to be an explanatory variable.
Second, we demonstrate postestimation capabilitites to replicate http://www.stata.com/manuals13/tsarimapostestimation.pdf. The model from example 4 is used to demonstrate:
One-step-ahead in-sample prediction
n-step-ahead out-of-sample forecasting
n-step-ahead in-sample dynamic prediction
End of explanation
"""
# Dataset
data = pd.read_stata('data/wpi1.dta')
data.index = data.t
# Fit the model
mod = ssm.SARIMAX(data['wpi'], trend='c', order=(1,1,1))
res = mod.fit()
print(res.summary())
"""
Explanation: ARIMA Example 1: Arima
As can be seen in the graphs from Example 2, the Wholesale price index (WPI) is growing over time (i.e. is not stationary). Therefore an ARMA model is not a good specification. In this first example, we consider a model where the original time series is assumed to be integrated of order 1, so that the difference is assumed to be stationary, and fit a model with one autoregressive lag and one moving average lag, as well as an intercept term.
The postulated data process is then:
$$
\Delta y_t = c + \phi_1 \Delta y_{t-1} + \theta_1 \epsilon_{t-1} + \epsilon_{t}
$$
where $c$ is the intercept of the ARMA model, $\Delta$ is the first-difference operator, and we assume $\epsilon_{t} \sim N(0, \sigma^2)$. This can be rewritten to emphasize lag polynomials as (this will be useful in example 2, below):
$$
(1 - \phi_1 L ) \Delta y_t = c + (1 + \theta_1 L) \epsilon_{t}
$$
where $L$ is the lag operator.
Notice that one difference between the Stata output and the output below is that Stata estimates the following model:
$$
(\Delta y_t - \beta_0) = \phi_1 ( \Delta y_{t-1} - \beta_0) + \theta_1 \epsilon_{t-1} + \epsilon_{t}
$$
where $\beta_0$ is the mean of the process $y_t$. This model is equivalent to the one estimated in the SARIMAX class, but the interpretation is different. To see the equivalence, note that:
$$
(\Delta y_t - \beta_0) = \phi_1 ( \Delta y_{t-1} - \beta_0) + \theta_1 \epsilon_{t-1} + \epsilon_{t} \
\Delta y_t = (1 - \phi_1) \beta_0 + \phi_1 \Delta y_{t-1} + \theta_1 \epsilon_{t-1} + \epsilon_{t}
$$
so that $c = (1 - \phi_1) \beta_0$.
End of explanation
"""
# Dataset
data = pd.read_stata('data/wpi1.dta')
data.index = data.t
data['ln_wpi'] = np.log(data['wpi'])
data['D.ln_wpi'] = data['ln_wpi'].diff()
# Graph data
fig, axes = plt.subplots(1, 2, figsize=(15,4))
# Levels
axes[0].plot(data.index._mpl_repr(), data['wpi'], '-')
axes[0].set(title='US Wholesale Price Index')
# Log difference
axes[1].plot(data.index._mpl_repr(), data['D.ln_wpi'], '-')
axes[1].hlines(0, data.index[0], data.index[-1], 'r')
axes[1].set(title='US Wholesale Price Index - difference of logs');
# Graph data
fig, axes = plt.subplots(1, 2, figsize=(15,4))
fig = sm.graphics.tsa.plot_acf(data.ix[1:, 'D.ln_wpi'], lags=40, ax=axes[0])
fig = sm.graphics.tsa.plot_pacf(data.ix[1:, 'D.ln_wpi'], lags=40, ax=axes[1])
"""
Explanation: Thus the maximum likelihood estimates imply that for the process above, we have:
$$
\Delta y_t = 0.1050 + 0.8740 \Delta y_{t-1} - 0.4206 \epsilon_{t-1} + \epsilon_{t}
$$
where $\epsilon_{t} \sim N(0, 0.5226)$. Finally, recall that $c = (1 - \phi_1) \beta_0$, and here $c = 0.1050$ and $\phi_1 = 0.8740$. To compare with the output from Stata, we could calculate the mean:
$$\beta_0 = \frac{c}{1 - \phi_1} = \frac{0.1050}{1 - 0.8740} = 0.83$$
Note: these values are slightly different from the values in the Stata documentation because the optimizer here has found parameters here that yield a higher likelihood. Nonetheless, they are very close.
ARIMA Example 2: Arima with additive seasonal effects
This model is an extension of that from example 1. Here the data is assumed to follow the process:
$$
\Delta y_t = c + \phi_1 \Delta y_{t-1} + \theta_1 \epsilon_{t-1} + \theta_4 \epsilon_{t-4} + \epsilon_{t}
$$
The new part of this model is that there is allowed to be a annual seasonal effect (it is annual even though the periodicity is 4 because the dataset is quarterly). The second difference is that this model uses the log of the data rather than the level.
Before estimating the dataset, graphs showing:
The time series (in logs)
The first difference of the time series (in logs)
The autocorrelation function
The partial autocorrelation function.
From the first two graphs, we note that the original time series does not appear to be stationary, whereas the first-difference does. This supports either estimating an ARMA model on the first-difference of the data, or estimating an ARIMA model with 1 order of integration (recall that we are taking the latter approach). The last two graphs support the use of an ARMA(1,1,1) model.
End of explanation
"""
# Fit the model
mod = ssm.SARIMAX(data['ln_wpi'], trend='c', order=(1,1,(1,0,0,1)))
res = mod.fit(method='powell', disp=False)
print(res.summary())
"""
Explanation: To understand how to specify this model here, first recall that from example 1 we used the following code to specify the ARIMA(1,1,1) model:
python
mod = sm.tsa.statespace.SARIMAX(data['wpi'], trend='c', order=(1,1,1))
The order argument is a tuple of the form (AR specification, Integration order, MA specification). The integration order must be an integer (for example, here we assumed one order of integration, so it was specified as 1. In a pure ARMA model where the underlying data is already stationary, it would be 0).
For the AR specification and MA specification components, there are two possiblities. The first is to specify the maximum degree of the corresponding lag polynomial, in which case the component is an integer. For example, if we wanted to specify an ARIMA(1,1,4) process, we would use:
python
mod = sm.tsa.statespace.SARIMAX(data['wpi'], trend='c', order=(1,1,4))
and the corresponding data process would be:
$$
y_t = c + \phi_1 y_{t-1} + \theta_1 \epsilon_{t-1} + \theta_2 \epsilon_{t-2} + \theta_3 \epsilon_{t-3} + \theta_4 \epsilon_{t-4} + \epsilon_{t}
$$
or
$$
(1 - \phi_1 L)\Delta y_t = c + (1 + \theta_1 L + \theta_2 L^2 + \theta_3 L^3 + \theta_4 L^4) \epsilon_{t}
$$
When the specification parameter is given as a maximum degree of the lag polynomial, it implies that all polynomial terms up to that degree are included. Notice that this is not the model we want to use, because it would include terms for $\epsilon_{t-2}$ and $\epsilon_{t-3}$, which we don't want here.
What we want is a polynomial that has terms for the 1st and 4th degrees, but leaves out the 2nd and 3rd terms. To do that, we need to provide a tuple for the specifiation parameter, where the tuple describes the lag polynomial itself. In particular, here we would want to use:
python
ar = 1 # this is the maximum degree specification
ma = (1,0,0,1) # this is the lag polynomial specification
mod = sm.tsa.statespace.SARIMAX(data['wpi'], trend='c', order=(ar,1,ma)))
This gives the following form for the process of the data:
$$
\Delta y_t = c + \phi_1 \Delta y_{t-1} + \theta_1 \epsilon_{t-1} + \theta_4 \epsilon_{t-4} + \epsilon_{t} \
(1 - \phi_1 L)\Delta y_t = c + (1 + \theta_1 L + \theta_4 L^4) \epsilon_{t}
$$
which is what we want.
End of explanation
"""
# Dataset
data = pd.read_stata('data/air2.dta')
data.index = pd.date_range(start=datetime(data.time[0], 1, 1), periods=len(data), freq='MS')
data['lnair'] = np.log(data['air'])
# Fit the model
mod = ssm.SARIMAX(data['lnair'], order=(2,1,0), seasonal_order=(1,1,0,12), simple_differencing=True)
res = mod.fit()
print(res.summary())
"""
Explanation: ARIMA Example 3: Airline Model
In the previous example, we included a seasonal effect in an additive way, meaning that we added a term allowing the process to depend on the 4th MA lag. It may be instead that we want to model a seasonal effect in a multiplicative way. We often write the model then as an ARIMA $(p,d,q) \times (P,D,Q)_s$, where the lowercast letters indicate the specification for the non-seasonal component, and the uppercase letters indicate the specification for the seasonal component; $s$ is the periodicity of the seasons (e.g. it is often 4 for quarterly data or 12 for monthly data). The data process can be written generically as:
$$
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D y_t = A(t) + \theta_q (L) \tilde \theta_Q (L^s) \epsilon_t
$$
where:
$\phi_p (L)$ is the non-seasonal autoregressive lag polynomial
$\tilde \phi_P (L^s)$ is the seasonal autoregressive lag polynomial
$\Delta^d \Delta_s^D y_t$ is the time series, differenced $d$ times, and seasonally differenced $D$ times.
$A(t)$ is the trend polynomial (including the intercept)
$\theta_q (L)$ is the non-seasonal moving average lag polynomial
$\tilde \theta_Q (L^s)$ is the seasonal moving average lag polynomial
sometimes we rewrite this as:
$$
\phi_p (L) \tilde \phi_P (L^s) y_t^* = A(t) + \theta_q (L) \tilde \theta_Q (L^s) \epsilon_t
$$
where $y_t^* = \Delta^d \Delta_s^D y_t$. This emphasizes that just as in the simple case, after we take differences (here both non-seasonal and seasonal) to make the data stationary, the resulting model is just an ARMA model.
As an example, consider the airline model ARIMA $(2,1,0) \times (1,1,0)_{12}$, with an intercept. The data process can be written in the form above as:
$$
(1 - \phi_1 L - \phi_2 L^2) (1 - \tilde \phi_1 L^{12}) \Delta \Delta_{12} y_t = c + \epsilon_t
$$
Here, we have:
$\phi_p (L) = (1 - \phi_1 L - \phi_2 L^2)$
$\tilde \phi_P (L^s) = (1 - \phi_1 L^12)$
$d = 1, D = 1, s=12$ indicating that $y_t^*$ is derived from $y_t$ by taking first-differences and then taking 12-th differences.
$A(t) = c$ is the constant trend polynomial (i.e. just an intercept)
$\theta_q (L) = \tilde \theta_Q (L^s) = 1$ (i.e. there is no moving average effect)
It may still be confusing to see the two lag polynomials in front of the time-series variable, but notice that we can multiply the lag polynomials together to get the following model:
$$
(1 - \phi_1 L - \phi_2 L^2 - \tilde \phi_1 L^{12} + \phi_1 \tilde \phi_1 L^{13} + \phi_2 \tilde \phi_1 L^{14} ) y_t^* = c + \epsilon_t
$$
which can be rewritten as:
$$
y_t^ = c + \phi_1 y_{t-1}^ + \phi_2 y_{t-2}^ + \tilde \phi_1 y_{t-12}^ - \phi_1 \tilde \phi_1 y_{t-13}^ - \phi_2 \tilde \phi_1 y_{t-14}^ + \epsilon_t
$$
This is similar to the additively seasonal model from example 2, but the coefficients in front of the autoregressive lags are actually combinations of the underlying seasonal and non-seasonal parameters.
Specifying the model here is done simply by adding the seasonal_order argument, which accepts a tuple of the form (Seasonal AR specification, Seasonal Integration order, Seasonal MA, Seasonal periodicity). The seasonal AR and MA specifications, as before, can be expressed as a maximum polynomial degree or as the lag polynomial itself. Seasonal periodicity is an integer.
For the airline model ARIMA $(2,1,0) \times (1,1,0)_{12}$ with an intercept, the command is:
python
mod = sm.tsa.statespace.SARIMAX(data['lnair'], order=(2,1,0), seasonal_order=(1,1,0,12))
End of explanation
"""
# Dataset
data = pd.read_stata('data/friedman2.dta')
data.index = data.time
# Variables
endog = data.ix['1959':'1981', 'consump']
exog = sm.add_constant(data.ix['1959':'1981', 'm2'])
# Fit the model
mod = ssm.SARIMAX(endog, exog, order=(1,0,1))
res = mod.fit()
print(res.summary())
"""
Explanation: Notice that here we used an additional argument simple_differencing=True. This controls how the order of integration is handled in ARIMA models. If simple_differencing=True, then the time series provided as endog is literatlly differenced and an ARMA model is fit to the resulting new time series. This implies that a number of initial periods are lost to the differencing process, however it may be necessary either to compare results to other packages (e.g. Stata's arima always uses simple differencing) or if the seasonal periodicity is large.
The default is simple_differencing=False, in which case the integration component is implemented as part of the state space formulation, and all of the original data can be used in estimation.
ARIMA Example 4: ARMAX (Friedman)
This model demonstrates the use of explanatory variables (the X part of ARMAX). When exogenous regressors are included, the SARIMAX module uses the concept of "regression with SARIMA errors" (see http://robjhyndman.com/hyndsight/arimax/ for details of regression with ARIMA errors versus alternative specifications), so that the model is specified as:
$$
y_t = \beta_t x_t + u_t \
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D u_t = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \epsilon_t
$$
Notice that the first equation is just a linear regression, and the second equation just describes the process followed by the error component as SARIMA (as was described in example 3). One reason for this specification is that the estimated parameters have their natural interpretations.
This specification nests many simpler specifications. For example, regression with AR(2) errors is:
$$
y_t = \beta_t x_t + u_t \
(1 - \phi_1 L - \phi_2 L^2) u_t = A(t) + \epsilon_t
$$
The model considered in this example is regression with ARMA(1,1) errors. The process is then written:
$$
\text{consump}_t = \beta_0 + \beta_1 \text{m2}_t + u_t \
(1 - \phi_1 L) u_t = (1 - \theta_1 L) \epsilon_t
$$
Notice that $\beta_0$ is, as described in example 1 above, not the same thing as an intercept specified by trend='c'. Whereas in the examples above we estimated the intercept of the model via the trend polynomial, here, we demonstrate how to estimate $\beta_0$ itself by adding a constant to the exogenous dataset. In the output, the $beta_0$ is called const, whereas above the intercept $c$ was called intercept in the output.
End of explanation
"""
# Dataset
raw = pd.read_stata('data/friedman2.dta')
raw.index = raw.time
data = raw.ix[:'1981']
# Variables
endog = data.ix['1959':, 'consump']
exog = sm.add_constant(data.ix['1959':, 'm2'])
nobs = endog.shape[0]
# Fit the model
mod = ssm.SARIMAX(endog.ix[:'1978-01-01'], exog=exog.ix[:'1978-01-01'], order=(1,0,1))
fit_res = mod.fit()
print(fit_res.summary())
"""
Explanation: ARIMA Postestimation: Example 1 - Dynamic Forecasting
Here we describe some of the post-estimation capabilities of SARIMAX.
First, using the model from example, we estimate the parameters using data that excludes the last few observations (this is a little artificial as an example, but it allows considering performance of out-of-sample forecasting and facilitates comparison to Stata's documentation).
End of explanation
"""
mod = sm.tsa.statespace.SARIMAX(endog, exog=exog, order=(1,0,1))
res = mod.filter(fit_res.params)
"""
Explanation: Next, we want to get results for the full dataset but using the estimated parameters (on a subset of the data).
End of explanation
"""
# In-sample one-step-ahead predictions
predict = res.get_prediction()
predict_ci = predict.conf_int()
"""
Explanation: The predict command is first applied here to get in-sample predictions. We use the full_results=True argument to allow us to calculate confidence intervals (the default output of predict is just the predicted values).
With no other arguments, predict returns the one-step-ahead in-sample predictions for the entire sample.
End of explanation
"""
# Dynamic predictions
predict_dy = res.get_prediction(dynamic='1978-01-01')
predict_dy_ci = predict_dy.conf_int()
"""
Explanation: We can also get dynamic predictions. One-step-ahead prediction uses the true values of the endogenous values at each step to predict the next in-sample value. Dynamic predictions use one-step-ahead prediction up to some point in the dataset (specified by the dynamic argument); after that, the previous predicted endogenous values are used in place of the true endogenous values for each new predicted element.
The dynamic argument is specified to be an offset relative to the start argument. If start is not specified, it is assumed to be 0.
Here we perform dynamic prediction starting in the first quarter of 1978.
End of explanation
"""
# Graph
fig, ax = plt.subplots(figsize=(9,4))
npre = 4
ax.set(title='Personal consumption', xlabel='Date', ylabel='Billions of dollars')
# Plot data points
data.ix['1977-07-01':, 'consump'].plot(ax=ax, style='o', label='Observed')
# Plot predictions
predict.predicted_mean.ix['1977-07-01':].plot(ax=ax, style='r--', label='One-step-ahead forecast')
ci = predict_ci.ix['1977-07-01':]
ax.fill_between(ci.index, ci.ix[:,0], ci.ix[:,1], color='r', alpha=0.1)
predict_dy.predicted_mean.ix['1977-07-01':].plot(ax=ax, style='g', label='Dynamic forecast (1978)')
ci = predict_dy_ci.ix['1977-07-01':]
ax.fill_between(ci.index, ci.ix[:,0], ci.ix[:,1], color='g', alpha=0.1)
legend = ax.legend(loc='lower right')
"""
Explanation: We can graph the one-step-ahead and dynamic predictions (and the corresponding confidence intervals) to see their relative performance. Notice that up to the point where dynamic prediction begins (1978:Q1), the two are the same.
End of explanation
"""
# Prediction error
# Graph
fig, ax = plt.subplots(figsize=(9,4))
npre = 4
ax.set(title='Forecast error', xlabel='Date', ylabel='Forecast - Actual')
# In-sample one-step-ahead predictions and 95% confidence intervals
predict_error = predict.predicted_mean - endog
predict_error.ix['1977-10-01':].plot(ax=ax, label='One-step-ahead forecast')
ci = predict_ci.ix['1977-10-01':].copy()
ci.iloc[:,0] -= endog.loc['1977-10-01':]
ci.iloc[:,1] -= endog.loc['1977-10-01':]
ax.fill_between(ci.index, ci.ix[:,0], ci.ix[:,1], alpha=0.1)
# Dynamic predictions and 95% confidence intervals
predict_dy_error = predict_dy.predicted_mean - endog
predict_dy_error.ix['1977-10-01':].plot(ax=ax, style='r', label='Dynamic forecast (1978)')
ci = predict_dy_ci.ix['1977-10-01':].copy()
ci.iloc[:,0] -= endog.loc['1977-10-01':]
ci.iloc[:,1] -= endog.loc['1977-10-01':]
ax.fill_between(ci.index, ci.ix[:,0], ci.ix[:,1], color='r', alpha=0.1)
legend = ax.legend(loc='lower left');
legend.get_frame().set_facecolor('w')
"""
Explanation: Finally, graph the prediction error. It is obvious that, as one would suspect, one-step-ahead prediction is considerably better.
End of explanation
"""
|
ethen8181/machine-learning | model_selection/prob_calibration/deeplearning_prob_calibration.ipynb | mit | import os
# path : store the current path to convert back to it later
path = os.getcwd()
os.chdir(os.path.join('..', '..', 'notebook_format'))
from formats import load_style
load_style(css_style='custom2.css', plot_style=False)
# 1. magic for inline plot
# 2. magic to print version
# 3. magic so that the notebook will reload external python modules
# 4. magic to enable retina (high resolution) plots
# https://gist.github.com/minrk/3301035
%matplotlib inline
%load_ext watermark
%load_ext autoreload
%autoreload 2
%config InlineBackend.figure_format='retina'
import os
import time
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import trange
from torch import optim
from torch.utils.data import DataLoader
from datasets import load_dataset, DatasetDict, Dataset
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification,
TrainingArguments,
Trainer,
DataCollatorWithPadding
)
%watermark -a 'Ethen' -d -t -v -p datasets,transformers,torch,tokenizers,numpy,pandas,matplotlib
"""
Explanation: <h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Deep-Learning-Model-Calibration-with-Temperature-Scaling" data-toc-modified-id="Deep-Learning-Model-Calibration-with-Temperature-Scaling-1"><span class="toc-item-num">1 </span>Deep Learning Model Calibration with Temperature Scaling</a></span><ul class="toc-item"><li><span><a href="#Tokenizer" data-toc-modified-id="Tokenizer-1.1"><span class="toc-item-num">1.1 </span>Tokenizer</a></span></li><li><span><a href="#Model-FineTuning" data-toc-modified-id="Model-FineTuning-1.2"><span class="toc-item-num">1.2 </span>Model FineTuning</a></span></li><li><span><a href="#Model-Calibration" data-toc-modified-id="Model-Calibration-1.3"><span class="toc-item-num">1.3 </span>Model Calibration</a></span><ul class="toc-item"><li><span><a href="#Temperature-Scaling" data-toc-modified-id="Temperature-Scaling-1.3.1"><span class="toc-item-num">1.3.1 </span>Temperature Scaling</a></span></li></ul></li></ul></li><li><span><a href="#Reference" data-toc-modified-id="Reference-2"><span class="toc-item-num">2 </span>Reference</a></span></li></ul></div>
End of explanation
"""
dataset_dict = load_dataset("quora")
dataset_dict
dataset_dict['train'][0]
test_size = 0.1
val_size = 0.1
dataset_dict_test = dataset_dict['train'].train_test_split(test_size=test_size)
dataset_dict_train_val = dataset_dict_test['train'].train_test_split(test_size=val_size)
dataset_dict = DatasetDict({
"train": dataset_dict_train_val["train"],
"val": dataset_dict_train_val["test"],
"test": dataset_dict_test["test"]
})
dataset_dict
"""
Explanation: Deep Learning Model Calibration with Temperature Scaling
In this article, we'll be going over two main things:
Process of finetuning a pre-trained BERT model towards a text classification task, more specificially, the Quora Question Pairs challenge.
Process of evaluating model calibration and improving upong calibration error using temperature scaling.
Finetuning pre-trained models on downstream tasks has been increasingly popular these days, this notebook documents the findings on these model's calibration. Calibration in this context means does the model's predicted score reflects true probability. If the reader is not familiar with model calibration 101, there is a separate notebook that covers this topic. Reading up till the "Measuring Calibration" section should suffice.
End of explanation
"""
# https://huggingface.co/transformers/model_doc/mobilebert.html
pretrained_model_name_or_path = "google/mobilebert-uncased"
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path)
tokenizer
"""
Explanation: Tokenizer
We won't be going over the details of the pre-trained tokenizer or model and only load a pre-trained one available from the huggingface model repository.
End of explanation
"""
encoded_input = tokenizer(
'What is the step by step guide to invest in share market in india?',
'What is the step by step guide to invest in share market?'
)
encoded_input
"""
Explanation: We can feed our tokenizer directly with a pair of sentences.
End of explanation
"""
tokenizer.decode(encoded_input["input_ids"])
"""
Explanation: Decoding the tokenized inputs, this model's tokenizer adds some special tokens such as, [SEP], that is used to indicate which token belongs to which segment/pair.
End of explanation
"""
def tokenize_fn(examples):
labels = [int(label) for label in examples['is_duplicate']]
texts = [question['text'] for question in examples['questions']]
texts1 = [text[0] for text in texts]
texts2 = [text[1] for text in texts]
tokenized_examples = tokenizer(texts1, texts2)
tokenized_examples['labels'] = labels
return tokenized_examples
dataset_dict_tokenized = dataset_dict.map(
tokenize_fn,
batched=True,
num_proc=8,
remove_columns=['is_duplicate', 'questions']
)
dataset_dict_tokenized
dataset_dict_tokenized['train'][0]
"""
Explanation: The proprocessing step will be task specific, if we happen to be using another dataset, this function needs to be modified accordingly.
End of explanation
"""
model_checkpoint = 'text_classification'
num_labels = 2
# we'll save the model after fine tuning it once, so we can skip the fine tuning part during
# the second round if we detect that we already have one available
if os.path.isdir(model_checkpoint):
model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint)
else:
model = AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path, num_labels=num_labels)
print('# of parameters: ', model.num_parameters())
model
data_collator = DataCollatorWithPadding(tokenizer, padding=True)
data_collator
"""
Explanation: Model FineTuning
Having preprocessed our raw dataset, for our text classification task, we use AutoModelForSequenceClassification class to load the pre-trained model, the only other argument we need to specify is the number of class/label our text classification task has. Upon instantiating the model for the first time, we'll see some warnings generated, telling us we should fine tune this model on our down stream tasks before using it.
End of explanation
"""
batch_size = 64
args = TrainingArguments(
"quora",
evaluation_strategy="epoch",
learning_rate=1e-4,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
num_train_epochs=2,
weight_decay=0.01,
load_best_model_at_end=True
)
trainer = Trainer(
model,
args,
data_collator=data_collator,
train_dataset=dataset_dict_tokenized["train"],
eval_dataset=dataset_dict_tokenized['val']
)
if not os.path.isdir(model_checkpoint):
trainer.train()
model.save_pretrained(model_checkpoint)
class SoftmaxModule(nn.Module):
"""
Add a softmax layer on top the base model. Note this does not necessarily
mean the output score is a well-calibrated probability.
"""
def __init__(self, model_path: str):
super().__init__()
self.model_path = model_path
self.model = AutoModelForSequenceClassification.from_pretrained(model_path)
def forward(self, input_ids, attention_mask, token_type_ids):
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids
)
scores = nn.functional.softmax(outputs.logits, dim=-1)[:, 1]
return scores
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
softmax_module = SoftmaxModule(model_checkpoint).to(device)
softmax_module.eval()
softmax_module
"""
Explanation: We can perform all sorts of hyper parameter tuning on the fine tuning step, here we'll pick some default parameters for illustration purposes.
End of explanation
"""
def predict(model, examples, round_digits: int = 5):
input_ids = examples['input_ids'].to(device)
attention_mask = examples['attention_mask'].to(device)
token_type_ids = examples['token_type_ids'].to(device)
batch_labels = examples['labels'].detach().cpu().numpy().tolist()
model.eval()
with torch.no_grad():
batch_output = model(input_ids, attention_mask, token_type_ids)
batch_scores = np.round(batch_output.detach().cpu().numpy(), round_digits).tolist()
return batch_scores, batch_labels
def predict_data_loader(model, data_loader: DataLoader) -> pd.DataFrame:
scores = []
labels = []
for examples in data_loader:
batch_scores, batch_labels = predict(model, examples)
scores += batch_scores
labels += batch_labels
df_predictions = pd.DataFrame.from_dict({'scores': scores, 'labels': labels})
return df_predictions
data_collator = DataCollatorWithPadding(tokenizer, padding=True)
data_loader = DataLoader(dataset_dict_tokenized['test'], collate_fn=data_collator, batch_size=128)
start = time.time()
df_predictions = predict_data_loader(softmax_module, data_loader)
end = time.time()
print('elapsed: ', end - start)
print(df_predictions.shape)
df_predictions.head()
"""
Explanation: We define some helper functions to generate predictions for our dataset, store the predicted score and label into a pandas DataFrame.
End of explanation
"""
class TemperatureScalingCalibrationModule(nn.Module):
def __init__(self, model_path: str):
super().__init__()
self.model_path = model_path
self.model = AutoModelForSequenceClassification.from_pretrained(model_path)
# the single temperature scaling parameter, the initialization value doesn't
# seem to matter that much based on some ad-hoc experimentation
self.temperature = nn.Parameter(torch.ones(1))
def forward(self, input_ids, attention_mask, token_type_ids):
"""forward method that returns softmax-ed confidence scores."""
outputs = self.forward_logit(input_ids, attention_mask, token_type_ids)
scores = nn.functional.softmax(outputs, dim=-1)[:, 1]
return scores
def forward_logit(self, input_ids, attention_mask, token_type_ids):
"""forward method that returns logits, to be used with cross entropy loss."""
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids
).logits
return outputs / self.temperature
def fit(self, dataset_tokenized, n_epochs: int = 3, batch_size: int = 64, lr: float = 0.01):
"""fits the temperature scaling parameter."""
data_collator = DataCollatorWithPadding(tokenizer, padding=True)
data_loader = DataLoader(dataset_tokenized, collate_fn=data_collator, batch_size=batch_size)
self.freeze_base_model()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(self.parameters(), lr=lr)
for epoch in trange(n_epochs):
for examples in data_loader:
labels = examples['labels'].long().to(device)
input_ids = examples['input_ids'].to(device)
attention_mask = examples['attention_mask'].to(device)
token_type_ids = examples['token_type_ids'].to(device)
# standard step to perform the forward and backward step
self.zero_grad()
predict_proba = self.forward_logit(input_ids, attention_mask, token_type_ids)
loss = criterion(predict_proba, labels)
loss.backward()
optimizer.step()
return self
def freeze_base_model(self):
"""remember to freeze base model's parameters when training temperature scaler"""
self.model.eval()
for parameter in self.model.parameters():
parameter.requires_grad = False
return self
calibration_module = TemperatureScalingCalibrationModule(model_checkpoint).to(device)
calibration_module
calibration_module.fit(dataset_dict_tokenized['val'])
calibration_module.temperature
data_collator = DataCollatorWithPadding(tokenizer, padding=True)
data_loader = DataLoader(dataset_dict_tokenized['test'], collate_fn=data_collator, batch_size=128)
start = time.time()
df_calibrated_predictions = predict_data_loader(calibration_module, data_loader)
end = time.time()
print('elapsed: ', end - start)
print(df_calibrated_predictions.shape)
df_calibrated_predictions.head()
"""
Explanation: Model Calibration
Temperature Scaling
Temperature Scaling is a post-processing technique that was proposed to improve upon the calibration error, but specifically designed for deep learning. It works by dividing the logits (output of the layer right before the final softmax layer) by a learned scalar parameter.
\begin{align}
\text{softmax} = \frac{e^{(z/T)}}{\sum_i e^{(z_i/T)}}
\end{align}
where $z$ is the logit, and $T$ is the learned temperature scaling parameter. We learn this parameter on a validation set, where $T$ is chosen to minimize negative log likelihood. As we can imagine, with $T \ge 1$, it lowers the predicted score across all classes, making the model less confident about its predictions but does not change the model's predicted maximum class.
The benefit of this approach is mainly two folds:
Unlike a lot of post processing calibration technique, temperature scaling can be directly embedded into our deep learning module as a single additional parameter. We can export the model as is using standard serialization techniques for that specific deep learning library and perform inferencing at run time without introducing additional dependencies.
It has been shown to provide potent calibration performance when compared to other post processing calibration techniques by the original paper.
End of explanation
"""
from calibration_module.utils import compute_calibration_summary
eval_dict = {
f'{model_checkpoint}': df_predictions,
f'{model_checkpoint}_calibrated': df_calibrated_predictions
}
# change default style figure and font size
plt.rcParams['figure.figsize'] = 12, 12
plt.rcParams['font.size'] = 12
n_bins = 20
df_result = compute_calibration_summary(eval_dict, label_col='labels', score_col='scores', n_bins=n_bins)
df_result
"""
Explanation: From the plot below, we can see our predicted score on the datset is concentrated on the higher end, however, from the calibration plot, it seems like the original predicted score is already pretty well calibrated, and with temperature scaling, we were able to improve upon the calibration metrics even further. Looking the the final value of the trained temperature scaling, a larger than 1 value indicates that it is indeed shrinking the predicted score to make the model less confident on its prediction.
End of explanation
"""
|
Murali-group/PathLinker-Cytoscape | cytoscape-automation-example/simple_use_case.ipynb | gpl-3.0 | # necessary libraries and dependencies
import sys
from py2cytoscape.data.cyrest_client import CyRestClient
from py2cytoscape.data.style import StyleUtil
import networkx as nx
import pandas as pd
import json
import requests
print("python version: " + sys.version)
# The py2cytoscape module doesn't have a version. I installed it 2018-04-13
#print("py2cytoscape version: " + py2cytoscape.__version__)
print("networkx version: " + nx.__version__)
print("pandas version: " + pd.__version__)
print("requests version: " + requests.__version__)
# !!!!!!!!!!!!!!!!! Step 0: Start Cytoscape 3.6 with cyREST App !!!!!!!!!!!!!!!!!!!!!!!!!!
# Cytoscape must be running to use the automation features
# Step 1: create an instance of cyRest client
cy = CyRestClient()
# Reset the session
#cy.session.delete()
"""
Explanation: Use Case for "Automating the PathLinker App for Cytoscape" - F1000, 2018
<img src="http://apps.cytoscape.org/media/pathlinker/logo.png.png" alt="PathLinker Logo">
Links
PathLinker Cytoscape App documentation
py2cytoscape installation instructions
Full py2cytoscape workflows can be found in the cytoscape automation repo
Requirments
Java 8
Cytoscape 3.6.0+
cyREST 3.6.0+
PathLinker App 1.4.1+
py2cytoscape 0.4.2+
End of explanation
"""
# Step 2: Import/Create the network that PathLinker will run on
network_file = 'network-example-undirected.txt'
# create a new network by importing the data from a sample using pandas
df = pd.read_csv(network_file, sep='\t', lineterminator='\n')
# and create the networkx Graph from the pandas dataframe
G = nx.from_pandas_edgelist(df, "source", "target")
# create the CyNetwork object from the networkx in CytoScape
cy_network = cy.network.create_from_networkx(G, name = 'network-example-undirected', collection = 'F1000 PathLinker Use Case')
# obtain the CyNetwork object SUID
cy_network_suid = cy_network.get_id()
# give the network some style and a layout
my_style = cy.style.create('defaut')
# copied from here: https://github.com/cytoscape/cytoscape-automation/blob/master/for-scripters/Python/basic-fundamentals.ipynb
basic_settings = {
'NODE_FILL_COLOR': '#6AACB8',
'NODE_SIZE': 55,
'NODE_BORDER_WIDTH': 0,
'NODE_LABEL_COLOR': '#555555',
'EDGE_WIDTH': 2,
'EDGE_TRANSPARENCY': 100,
'EDGE_STROKE_UNSELECTED_PAINT': '#333333',
'NETWORK_BACKGROUND_PAINT': '#FFFFEA'
}
my_style.update_defaults(basic_settings)
# Create some mappings
my_style.create_passthrough_mapping(column='name', vp='NODE_LABEL', col_type='String')
cy.layout.apply(name="force-directed", network=cy_network)
cy.style.apply(my_style, cy_network)
#cy.layout.fit(network=cy_network)
"""
Explanation: Create network using networkx
This example uses the small and simple network found here: network-example-undirected.txt. <br>
End of explanation
"""
# Step 3: Construct input data to pass to PathLinker API function
# construct PathLinker input data for API request
# For a description of all of the parameters, please see below
params = {
'sources': 'a',
'targets': 'e h',
'k': 2, # the number of shortest path to compute
'treatNetworkAsUndirected': True, # Our graph is undirected, so use this option
'includeTiedPaths': True, # This option is not necessary. I'm including it here just to show what it does
}
# construct REST API request url
url = "http://localhost:1234/pathlinker/v1/" + str(cy_network_suid) + "/run"
# to just run on the network currently in view on cytoscape, use the following:
# url = "http://localhost:1234/pathlinker/v1/currentView/run"
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
# perform the REST API call
result_json = requests.request("POST",
url,
data = json.dumps(params),
params = None,
headers = headers)
# ------------ Description of all parameters ------------------
# the node names for the sources and targets are space separated
# and must match the "name" column in the Node Table in Cytoscape
params["sources"] = "a"
params["targets"] = "e h"
# the number of shortest path to compute, must be greater than 0
# Default: 50
params["k"] = 2
# Edge weight type, must be one of the three: [UNWEIGHTED, ADDITIVE, PROBABILITIES]
params["edgeWeightType"] = "UNWEIGHTED"
# Edge penalty. Not needed for UNWEIGHTED
# Must be 0 or greater for ADDITIVE, and 1 or greater for PROBABILITIES
params["edgePenalty"] = 0
# The column name in the Edge Table in Cytoscape containing edge weight property,
# column type must be numerical type
params["edgeWeightColumnName"] = "weight"
# The option to ignore directionality of edges when computing paths
# Default: False
params["treatNetworkAsUndirected"] = True
# Allow source/target nodes to appear as intermediate nodes in computed paths
# Default: False
params["allowSourcesTargetsInPaths"] = False
# Include more than k paths if the path length/score is equal to kth path length/score
# Default: False
params["includeTiedPaths"] = False
# Option to disable the generation of the subnetwork/view, path rank column, and result panel
# and only return the path result in JSON format
# Default: False
params["skipSubnetworkGeneration"] = False
"""
Explanation: The network shown below will be generated in Cytoscape with the above code.
Run PathLinker using the API function
Run PathLinker
The function takes user sources, targets, and a set of parameters, and computes the k shortest paths. The function returns the paths in JSON format. Based on the user input, the function could generate a subnetwork (and view) containing those paths, and returns the computed paths and subnetwork/view SUIDs.
Additional description of the parameters are available in the PathLinker app documentation.
End of explanation
"""
# Step 4: Store result, parse, and print
results = json.loads(result_json.content)
print("Output:\n")
# access the suid, references, and path rank column name
subnetwork_suid = results["subnetworkSUID"]
subnetwork_view_suid = results["subnetworkViewSUID"]
# The path rank column shows for each edge, the rank of the first path in which it appears
path_rank_column_name = results["pathRankColumnName"]
print("subnetwork SUID: %s" % (subnetwork_suid))
print("subnetwork view SUID: %s" % (subnetwork_view_suid))
print("Path rank column name: %s" % (path_rank_column_name))
print("")
# access the paths generated by PathLinker
paths = results["paths"]
# print the paths found
for path in paths:
print("path rank: %d" % (path['rank']))
print("path score: %s" % (str(path['score'])))
print("path: %s" % ("|".join(path['nodeList'])))
# write them to a file
paths_file = "use-case-images/paths.txt"
print("Writing paths to %s" % (paths_file))
with open(paths_file, 'w') as out:
out.write("path rank\tpath score\tpath\n")
for path in paths:
out.write('%d\t%s\t%s\n' % (path['rank'], str(path['score']), "|".join(path['nodeList'])))
# access network and network view references
subnetwork = cy.network.create(suid=subnetwork_suid)
#subnetwork_view = subnetwork.get_first_view()
# TODO copy the layout of the original graph to this graph to better visualize the results.
# The copycat layout doesn't seem to be working
# for now, just apply the cose layout to get a little better layout (see image below)
cy.layout.apply(name="cose", network=subnetwork)
"""
Explanation: Output
The app will generate the following (shown below):
- a subnetwork containing the paths (with the hierarchical layout applied)
- a path rank column in the Edge Table (shows for each edge, the rank of the first path in which it appears)
- a Result Panel within Cytoscape.
The API will return:
- the computed paths
- the SUIDs of the generated subnetwork and subnetwork view
- the path rank column name in JSON format.
End of explanation
"""
# *** Currently the function does not work therefore is commented out. ***
# import py2cytoscape.cytoscapejs as renderer
# # visualize the subnetwork view using CytoScape.js
# renderer.render(subnetwork_view, 'Directed', background='radial-gradient(#FFFFFF 15%, #DDDDDD 105%)')
"""
Explanation: The subnetwork with "cose" layout will look something like this:
Visualization using cytoscape.js and py2cytoscape
End of explanation
"""
# png
subnetwork_image_png = subnetwork.get_png()
subnetwork_image_file = 'use-case-images/subnetwork-image.png'
print("Writing PNG to %s" % (subnetwork_image_file))
with open(subnetwork_image_file, 'wb') as f:
f.write(subnetwork_image_png)
from IPython.display import Image
Image(subnetwork_image_png)
# # pdf
# subnetwork_image_pdf = subnetwork.get_pdf()
# subnetwork_image_file = subnetwork_image_file.replace('.png', '.pdf')
# print("Writing PDF to %s" % (subnetwork_image_file))
# with open(subnetwork_image_file, 'wb') as f:
# f.write(subnetwork_image_pdf)
# # display the pdf in frame
# from IPython.display import IFrame
# IFrame('use_case_images/subnetwork_image.pdf', width=600, height=300)
# # svg
# subnetwork_image_svg = subnetwork.get_svg()
# from IPython.display import SVG
# SVG(subnetwork_image_svg)
"""
Explanation: View the subnetwork and store the image
End of explanation
"""
|
mayank-johri/LearnSeleniumUsingPython | Section 2 - Advance Python/Chapter S2.01 - Functional Programming/01.01_Functions_as_First_Class_citizens.ipynb | gpl-3.0 | a = 10
def test_function():
pass
print(id(a), dir(a))
print(id(test_function), dir(test_function))
"""
Explanation: Functions as First-Class citizens
In functional programming, functions can be treated as objects. That is, they can assigned to a variable, can be passed as arguments or even returned from other functions.
End of explanation
"""
# Example lambda keyword
product_func = lambda x, y: x*y
print(product_func(10, 20))
print(product_func(10, 2))
concat = lambda x, y: [x, y]
print(concat([1,2,3], 4))
"""
Explanation: The lambda
The simplest way to initialize a pure function in python is by using lambda keyword, which helps in defining the one-line function. Functions initialized with lambda can often called anonymous functions
End of explanation
"""
def square(x):
"""
This returns the square of the requested number `x`
"""
return x**2
print(square(10))
print(square(100))
# Assignation to another variable
mySquare = square
print(mySquare(100))
print(square)
print(mySquare)
print(id(square))
print(id(mySquare))
# attributes present
print("*"*30)
print(dir(square))
print("*"*30)
print(mySquare.__name__)
print("*"*30)
print(square.__code__)
print("*"*30)
print(square.__doc__)
"""
Explanation: Functions as Objects
Functions are first-class objects in Python, meaning they have attributes and can be referenced and assigned to variables.
End of explanation
"""
square.d = 10
print(dir(square))
"""
Explanation: Adding attributes to a function
End of explanation
"""
print(square(square(square(2))))
product_func = lambda x, y: x*y
sum_func = lambda F, m: lambda x, y: F(x, y)+m
print(sum_func(product_func, 5)(2, 4))
print(sum_func)
print(sum_func(product_func, 5))
print(sum_func(product_func, 5)(3, 5))
"""
Explanation: higher-order function
Python also supports higher-order functions, meaning that functions can accept other functions as arguments and return functions to the caller.
End of explanation
"""
def outer(a):
"""
Outer function
"""
y = 0
def inner(x):
"""
inner function
"""
y = x*x*a
return(y)
print(a)
return inner
my_out = outer
my_out(102)
o = outer(10)
b = outer(20)
print("*"*20)
print(b)
print(o)
print("*"*20)
print(o(10))
print(b(10))
def outer():
"""
Outer function
"""
if 'a' in locals():
a +=10
else:
print("~"),
a = 20
def inner(x):
"""
inner function
"""
return(x*x*a)
print(a)
return inner
# oo = outer
# print(oo.__doc__)
o = outer()
print("*"*20)
print(o)
print(o(10))
print(o.__doc__)
b = outer()
print(b)
print(b(30))
print(b.__doc__)
x = 0
def outer():
x = 1
def inner():
x = 2
print("inner:", x)
inner()
print("outer:", x)
outer()
print("global:", x)
"""
Explanation: 13=2*4+5
F -> product_func
m => 5
x -> 2
y -> 4
2*4+5 = 8+5 = 13
In the above example higher-order function that takes two inputs- A function F(x) and a multiplier m.
Nested Functions
In Python, Function(s) can also be defined within the scope of another function. If this type of function definition is used the inner function is only in scope inside the outer function, so it is most often useful when the inner function is being returned (moving it to the outer scope) or when it is being passed into another function.
Notice that in the below example, a new instance of the function inner() is created on each call to outer(). That is because it is defined during the execution of outer(). The creation of the second instance has no impact on the first.
End of explanation
"""
x = 0
def outer():
x = 1
def inner():
nonlocal x
x = 2
print("inner:",x, "id:", id(x))
inner()
print("outer:",x, "id:", id(x))
outer()
print("global:",x, "id:", id(x))
def outer(a):
"""
Outer function
"""
y = 1
def inner(x):
"""
inner function
"""
nonlocal y
print(y)
y = x*x*a
return("y =" + str(y))
print(a)
return inner
o = outer(10)
b = outer(20)
print("*"*20)
print(o)
print(o(10))
print("*"*20)
print(b)
print(b(10))
"""
Explanation: Problem with local and global
lets take the above example, we have two functions, outer & inner. We also have x variable which is present as global and also present in both the functions.
If we want to access x of outer function from inner function than global keyword not help. Fortunately, Python provides a keyword nonlocal which allows inner functions to access variables to outer functions as shown in below example.
The details of nonlocal are details in https://www.python.org/dev/peps/pep-3104/
End of explanation
"""
# Encapsulation
def increment(current):
def inner_increment(x): # hidden from outer code
return x + 1
next_number = inner_increment(current)
return [current, next_number]
print(increment(10))
"""
Explanation: Inner / Nested Functions - When to use
Encapsulation
You use inner functions to protect them from anything happening outside of the function, meaning that they are hidden from the global scope.
End of explanation
"""
try:
increment.inner_increment(109)
except Exception as e:
print(e)
"""
Explanation: NOTE: We can not access directly the inner function as shown below
End of explanation
"""
# Keepin’ it DRY
def process(file_name):
def do_stuff(file_process):
for line in file_process:
print(line)
if isinstance(file_name, str):
with open(file_name, 'r') as f:
do_stuff(f)
else:
do_stuff(file_name)
process(["test", "test3", "t33"])
# process("test.txt")
"""
Explanation: Following DRY (Don't Repeat Yourself)
This type can be used if you have a section of code base in function is repeated in numerous places. For example, you might write a function which processes a file, and you want to accept either an open file object or a file name:
End of explanation
"""
def square(n):
return n**2
def cube(n):
return n**3
print(square(2))
def sqr(a, b):
return a**b
"""
Explanation: or have similar logic which can be replaced by a function, such as mathematical functions, or code base which can be clubed by using some parameters.
End of explanation
"""
def test():
print("TEST TEST TEST")
def yes(name):
print("Ja, ", name)
return True
return yes
d = test()
print("*" * 14)
a = d("Murthy")
print("*" * 14)
print(a)
def power(exp):
def subfunc(a):
return a**exp
return subfunc
square = power(2)
hexa = power(6)
print(square)
print(hexa)
print(square(5)) # 5**2
print()
print(hexa(3)) # 3**6
print(power(6)(3))
# subfunc(3) where exp = 6
# SQuare
# exp -> 2
# Square(5)
# a -> 5
# 5**2
# 25
Power(6)(3, x)
def a1(m):
x = m * 2
def b(v, t=None):
if t:
print(x, m, t)
return v + t
else:
print(x, m, v)
return v + x
return b
n = a1(2)
print(n(3))
print(n(3, 10))
def f1(a):
def f2(b):
return f2
def f3(c):
return f3
def f4(d):
return f4
def f5(e):
return f5
print (f1(1)(2)(3)(4)(5))
def f1(a):
def f2(b):
def f3(c):
def f4(d):
def f5(e):
print(e)
return f5
return f4
return f3
return f2
f1(1)(2)(3)(4)(5)
"""
Explanation: ??? why code
End of explanation
"""
def f(x):
def g(y):
return x + y
return g
def h(x):
return lambda y: x + y
a = f(1)
b = h(1)
print(a, b)
print(a(5), b(5))
print(f(1)(5), h(1)(5))
"""
Explanation: Closures & Factory Functions <sup>1</sup>
They are techniques for implementing lexically scoped name binding with first-class functions. It is a record, storing a function together with an environment. a mapping associating each free variable of the function (variables that are used locally, but defined in an enclosing scope) with the value or reference to which the name was bound when the closure was created.
A closure—unlike a plain function—allows the function to access those captured variables through the closure's copies of their values or references, even when the function is invoked outside their scope.
End of explanation
"""
def make_adder(x):
def add(y):
return x + y
return add
plus10 = make_adder(10)
print(plus10(12)) # make_adder(10).add(12)
print(make_adder(10)(12))
"""
Explanation: both a and b are closures—or rather, variables with a closure as value—in both cases produced by returning a nested function with a free variable from an enclosing function, so that the free variable binds to the parameter x of the enclosing function. However, in the first case the nested function has a name, g, while in the second case the nested function is anonymous. The closures need not be assigned to a variable, and can be used directly, as in the last lines—the original name (if any) used in defining them is irrelevant. This usage may be deemed an "anonymous closure".
1: Copied from : "https://en.wikipedia.org/wiki/Closure_(computer_programming)"
End of explanation
"""
a = 10
def test_function():
pass
print(id(a), dir(a))
print(id(test_function), dir(test_function))
"""
Explanation: Closures can avoid the use of global values and provides some form of data hiding. It can also provide an object oriented solution to the problem.
When there are few methods (one method in most cases) to be implemented in a class, closures can provide an alternate and more elegant solutions. But when the number of attributes and methods get larger, better implement a class.
In functional programming, functions can be treated as objects. That is, they can assigned to a variable, can be passed as arguments or even returned from other functions.
End of explanation
"""
# Example lambda keyword
product_func = lambda x, y: x*y
print(product_func(10, 20))
print(product_func(10, 2))
concat = lambda x, y: [x, y]
print(concat([1,2,3], 4))
"""
Explanation: The lambda
The simplest way to initialize a pure function in python is by using lambda keyword, which helps in defining the one-line function. Functions initialized with lambda can often called anonymous functions
End of explanation
"""
def square(x):
"""
This returns the square of the requested number `x`
"""
return x**2
print(square(10))
print(square(100))
# Assignation to another variable
mySquare = square
print(mySquare(100))
print(square)
print(mySquare)
print(id(square))
print(id(mySquare))
# attributes present
print("*"*30)
print(dir(square))
print("*"*30)
print(mySquare.__name__)
print("*"*30)
print(square.__code__)
print("*"*30)
print(square.__doc__)
"""
Explanation: Functions as Objects
Functions are first-class objects in Python, meaning they have attributes and can be referenced and assigned to variables.
End of explanation
"""
square.d = 10
print(dir(square))
"""
Explanation: Adding attributes to a function
End of explanation
"""
print(square(square(square(2))))
product_func = lambda x, y: x*y
sum_func = lambda F, m: lambda x, y: F(x, y)+m
print(sum_func(product_func, 5)(2, 4))
print(sum_func)
print(sum_func(product_func, 5))
print(sum_func(product_func, 5)(3, 5))
"""
Explanation: higher-order function
Python also supports higher-order functions, meaning that functions can accept other functions as arguments and return functions to the caller.
End of explanation
"""
def outer(a):
"""
Outer function
"""
y = 0
def inner(x):
"""
inner function
"""
y = x*x*a
return(y)
print(a)
return inner
my_out = outer
my_out(102)
o = outer(10)
b = outer(20)
print("*"*20)
print(b)
print(o)
print("*"*20)
print(o(10))
print(b(10))
def outer():
"""
Outer function
"""
if 'a' in locals():
a +=10
else:
print("~"),
a = 20
def inner(x):
"""
inner function
"""
return(x*x*a)
print(a)
return inner
# oo = outer
# print(oo.__doc__)
o = outer()
print("*"*20)
print(o)
print(o(10))
print(o.__doc__)
b = outer()
print(b)
print(b(30))
print(b.__doc__)
x = 0
def outer():
x = 1
def inner():
x = 2
print("inner:", x)
inner()
print("outer:", x)
outer()
print("global:", x)
"""
Explanation: 13=2*4+5
F -> product_func
m => 5
x -> 2
y -> 4
2*4+5 = 8+5 = 13
In the above example higher-order function that takes two inputs- A function F(x) and a multiplier m.
Nested Functions
In Python, Function(s) can also be defined within the scope of another function. If this type of function definition is used the inner function is only in scope inside the outer function, so it is most often useful when the inner function is being returned (moving it to the outer scope) or when it is being passed into another function.
Notice that in the below example, a new instance of the function inner() is created on each call to outer(). That is because it is defined during the execution of outer(). The creation of the second instance has no impact on the first.
End of explanation
"""
x = 0
def outer():
x = 1
def inner():
nonlocal x
x = 2
print("inner:",x, "id:", id(x))
inner()
print("outer:",x, "id:", id(x))
outer()
print("global:",x, "id:", id(x))
def outer(a):
"""
Outer function
"""
y = 1
def inner(x):
"""
inner function
"""
nonlocal y
print(y)
y = x*x*a
return("y =" + str(y))
print(a)
return inner
o = outer(10)
b = outer(20)
print("*"*20)
print(o)
print(o(10))
print("*"*20)
print(b)
print(b(10))
"""
Explanation: Problem with local and global
lets take the above example, we have two functions, outer & inner. We also have x variable which is present as global and also present in both the functions.
If we want to access x of outer function from inner function than global keyword not help. Fortunately, Python provides a keyword nonlocal which allows inner functions to access variables to outer functions as shown in below example.
End of explanation
"""
# Encapsulation
def increment(current):
def inner_increment(x): # hidden from outer code
return x + 1
next_number = inner_increment(current)
return [current, next_number]
print(increment(10))
"""
Explanation: Inner / Nested Functions - When to use
Encapsulation
You use inner functions to protect them from anything happening outside of the function, meaning that they are hidden from the global scope.
End of explanation
"""
try:
increment.inner_increment(109)
except Exception as e:
print(e)
### NOT WORKING
def update(str_val):
def updating(ori_str, key, value):
token = "$"
if key in ori_str:
ori_str = ori_str.replace(token+key, value)
return ori_str
keyval = [{"test1": "val_test", "t1" : "val_1"}, {"test2": "val_test2", "t2" : "val_2"}]
keyval1 = [{"test1": "val_test", "t1" : "val_1"}, {"test2": "val_test2", "t2" : "val_2"}]
ori_str = "This is a $test1 and $test2, $t1 and $t2"
# for k in keyval:
# for key, value in k.items():
# ori_str = updateing(ori_str, key, value)
sdd = [ key, value [for key, value in k] for(k in keyval) ]
print(ori_str)
update("D")
ld = [{'a': 10, 'b': 20}, {'p': 10, 'u': 100}]
[kv for d in ld for kv in d.items()]
ori_str = "This is a $test;1 and $test2, $t1 and $t2"
print(ori_str.replace("test1", "TEST1"))
print(ori_str)
"""
Explanation: NOTE: We can not access directly the inner function as shown below
End of explanation
"""
# Keepin’ it DRY
def process(file_name):
def do_stuff(file_process):
for line in file_process:
print(line)
if isinstance(file_name, str):
with open(file_name, 'r') as f:
do_stuff(f)
else:
do_stuff(file_name)
process(["test", "test3", "t33"])do_stuff(file_name)
process("test.txt")
"""
Explanation: Following DRY (Don't Repeat Yourself)
This type can be used if you have a section of code base in function is repeated in numerous places. For example, you might write a function which processes a file, and you want to accept either an open file object or a file name:
End of explanation
"""
def square(n):
return n**2
def cube(n):
return n**3
print(square(2))
def sqr(a, b):
return a**b
"""
Explanation: or have similar logic which can be replaced by a function, such as mathematical functions, or code base which can be clubed by using some parameters.
End of explanation
"""
def test():
print("TESTTESTTEST")
def yes(name):
print("Ja, ", name)
return True
return yes
d = test()
print("XSSSS")
print(d("Venky"))
def power(exp):
def subfunc(a):
return a**exp
return subfunc
square = power(2)
hexa = power(6)
print(square)
print(hexa)
print(square(5)) # 5**2
print()
print(hexa(3)) # 3**6
print(power(6)(3))
# subfunc(3) where exp = 6
# SQuare
# exp -> 2
# Square(5)
# a -> 5
# 5**2
# 25
Power(6)(3, x)
def a1(m):
x = m * 2
def b(v, t=None):
if t:
print(x, m, t)
return v + t
else:
print(x, m, v)
return v + x
return b
n = a1(2)
print(n(3))
print(n(3, 10))
def f1(a):
def f2(b):
return f2
def f3(c):
return f3
def f4(d):
return f4
def f5(e):
return f5
print (f1(1)(2)(3)(4)(5))
def f1(a):
def f2(b):
def f3(c):
def f4(d):
def f5(e):
print(e)
return f5
return f4
return f3
return f2
f1(1)(2)(3)(4)(5)
"""
Explanation: ??? why code
End of explanation
"""
def f(x):
def g(y):
return x + y
return g
def h(x):
return lambda y: x + y
a = f(1)
b = h(1)
print(a, b)
print(a(5), b(5))
print(f(1)(5), h(1)(5))
"""
Explanation: Closures & Factory Functions <sup>1</sup>
They are techniques for implementing lexically scoped name binding with first-class functions. It is a record, storing a function together with an environment. a mapping associating each free variable of the function (variables that are used locally, but defined in an enclosing scope) with the value or reference to which the name was bound when the closure was created.
A closure—unlike a plain function—allows the function to access those captured variables through the closure's copies of their values or references, even when the function is invoked outside their scope.
End of explanation
"""
def make_adder(x):
def add(y):
return x + y
return add
plus10 = make_adder(10)
print(plus10(12)) # make_adder(10).add(12)
print(make_adder(10)(12))
"""
Explanation: both a and b are closures—or rather, variables with a closure as value—in both cases produced by returning a nested function with a free variable from an enclosing function, so that the free variable binds to the parameter x of the enclosing function. However, in the first case the nested function has a name, g, while in the second case the nested function is anonymous. The closures need not be assigned to a variable, and can be used directly, as in the last lines—the original name (if any) used in defining them is irrelevant. This usage may be deemed an "anonymous closure".
1: Copied from : "https://en.wikipedia.org/wiki/Closure_(computer_programming)"
End of explanation
"""
a = 10
def test_function():
pass
print(id(a), dir(a))
print(id(test_function), dir(test_function))
"""
Explanation: Closures can avoid the use of global values and provides some form of data hiding. It can also provide an object oriented solution to the problem.
When there are few methods (one method in most cases) to be implemented in a class, closures can provide an alternate and more elegant solutions. But when the number of attributes and methods get larger, better implement a class.
In functional programming, functions can be treated as objects. That is, they can assigned to a variable, can be passed as arguments or even returned from other functions.
End of explanation
"""
# Example lambda keyword
product_func = lambda x, y: x*y
print(product_func(10, 20))
print(product_func(10, 2))
concat = lambda x, y: [x, y]
print(concat([1,2,3], 4))
"""
Explanation: The lambda
The simplest way to initialize a pure function in python is by using lambda keyword, which helps in defining the one-line function. Functions initialized with lambda can often called anonymous functions
End of explanation
"""
def square(x):
"""
This returns the square of the requested number `x`
"""
return x**2
print(square(10))
print(square(100))
# Assignation to another variable
mySquare = square
print(mySquare(100))
print(square)
print(mySquare)
print(id(square))
print(id(mySquare))
# attributes present
print("*"*30)
print(dir(square))
print("*"*30)
print(mySquare.__name__)
print("*"*30)
print(square.__code__)
print("*"*30)
print(square.__doc__)
"""
Explanation: Functions as Objects
Functions are first-class objects in Python, meaning they have attributes and can be referenced and assigned to variables.
End of explanation
"""
square.d = 10
print(dir(square))
"""
Explanation: Adding attributes to a function
End of explanation
"""
print(square(square(square(2))))
product_func = lambda x, y: x*y
sum_func = lambda F, m: lambda x, y: F(x, y)+m
print(sum_func(product_func, 5)(2, 4))
print(sum_func)
print(sum_func(product_func, 5))
print(sum_func(product_func, 5)(3, 5))
"""
Explanation: higher-order function
Python also supports higher-order functions, meaning that functions can accept other functions as arguments and return functions to the caller.
End of explanation
"""
def outer(a):
"""
Outer function
"""
y = 0
def inner(x):
"""
inner function
"""
y = x*x*a
return(y)
print(a)
return inner
my_out = outer
my_out(102)
o = outer(10)
b = outer(20)
print("*"*20)
print(b)
print(o)
print("*"*20)
print(o(10))
print(b(10))
def outer():
"""
Outer function
"""
if 'a' in locals():
a +=10
else:
print("~"),
a = 20
def inner(x):
"""
inner function
"""
return(x*x*a)
print(a)
return inner
# oo = outer
# print(oo.__doc__)
o = outer()
print("*"*20)
print(o)
print(o(10))
print(o.__doc__)
b = outer()
print(b)
print(b(30))
print(b.__doc__)
x = 0
def outer():
x = 1
def inner():
x = 2
print("inner:", x)
inner()
print("outer:", x)
outer()
print("global:", x)
"""
Explanation: 13=2*4+5
F -> product_func
m => 5
x -> 2
y -> 4
2*4+5 = 8+5 = 13
In the above example higher-order function that takes two inputs- A function F(x) and a multiplier m.
Nested Functions
In Python, Function(s) can also be defined within the scope of another function. If this type of function definition is used the inner function is only in scope inside the outer function, so it is most often useful when the inner function is being returned (moving it to the outer scope) or when it is being passed into another function.
Notice that in the below example, a new instance of the function inner() is created on each call to outer(). That is because it is defined during the execution of outer(). The creation of the second instance has no impact on the first.
End of explanation
"""
x = 0
def outer():
x = 1
def inner():
nonlocal x
x = 2
print("inner:",x, "id:", id(x))
inner()
print("outer:",x, "id:", id(x))
outer()
print("global:",x, "id:", id(x))
def outer(a):
"""
Outer function
"""
y = 1
def inner(x):
"""
inner function
"""
nonlocal y
print(y)
y = x*x*a
return("y =" + str(y))
print(a)
return inner
o = outer(10)
b = outer(20)
print("*"*20)
print(o)
print(o(10))
print("*"*20)
print(b)
print(b(10))
"""
Explanation: Problem with local and global
lets take the above example, we have two functions, outer & inner. We also have x variable which is present as global and also present in both the functions.
If we want to access x of outer function from inner function than global keyword not help. Fortunately, Python provides a keyword nonlocal which allows inner functions to access variables to outer functions as shown in below example.
End of explanation
"""
# Encapsulation
def increment(current):
def inner_increment(x): # hidden from outer code
return x + 1
next_number = inner_increment(current)
return [current, next_number]
print(increment(10))
"""
Explanation: Inner / Nested Functions - When to use
Encapsulation
You use inner functions to protect them from anything happening outside of the function, meaning that they are hidden from the global scope.
End of explanation
"""
try:
increment.inner_increment(109)
except Exception as e:
print(e)
### NOT WORKING
def update(str_val):
def updating(ori_str, key, value):
token = "$"
if key in ori_str:
ori_str = ori_str.replace(token+key, value)
return ori_str
keyval = [{"test1": "val_test", "t1" : "val_1"}, {"test2": "val_test2", "t2" : "val_2"}]
keyval1 = [{"test1": "val_test", "t1" : "val_1"}, {"test2": "val_test2", "t2" : "val_2"}]
ori_str = "This is a $test1 and $test2, $t1 and $t2"
# for k in keyval:
# for key, value in k.items():
# ori_str = updateing(ori_str, key, value)
sdd = [ key, value [for key, value in k] for(k in keyval) ]
print(ori_str)
update("D")
ld = [{'a': 10, 'b': 20}, {'p': 10, 'u': 100}]
[kv for d in ld for kv in d.items()]
ori_str = "This is a $test;1 and $test2, $t1 and $t2"
print(ori_str.replace("test1", "TEST1"))
print(ori_str)
"""
Explanation: NOTE: We can not access directly the inner function as shown below
End of explanation
"""
# Keepin’ it DRY
def process(file_name):
def do_stuff(file_process):
for line in file_process:
print(line)
if isinstance(file_name, str):
with open(file_name, 'r') as f:
do_stuff(f)
else:
do_stuff(file_name)
process(["test", "test3", "t33"])
# process("test.txt")
"""
Explanation: Following DRY (Don't Repeat Yourself)
This type can be used if you have a section of code base in function is repeated in numerous places. For example, you might write a function which processes a file, and you want to accept either an open file object or a file name:
End of explanation
"""
def square(n):
return n**2
def cube(n):
return n**3
print(square(2))
def sqr(a, b):
return a**b
"""
Explanation: or have similar logic which can be replaced by a function, such as mathematical functions, or code base which can be clubed by using some parameters.
End of explanation
"""
def test():
print("TESTTESTTEST")
def yes(name):
print("Ja, ", name)
return True
return yes
d = test()
print("XSSSS")
print(d("Venky"))
def power(exp):
def subfunc(a):
return a**exp
return subfunc
square = power(2)
hexa = power(6)
print(square)
print(hexa)
print(square(5)) # 5**2
print()
print(hexa(3)) # 3**6
print(power(6)(3))
# subfunc(3) where exp = 6
# SQuare
# exp -> 2
# Square(5)
# a -> 5
# 5**2
# 25
Power(6)(3, x)
def a1(m):
x = m * 2
def b(v, t=None):
if t:
print(x, m, t)
return v + t
else:
print(x, m, v)
return v + x
return b
n = a1(2)
print(n(3))
print(n(3, 10))
def f1(a):
def f2(b):
return f2
def f3(c):
return f3
def f4(d):
return f4
def f5(e):
return f5
print (f1(1)(2)(3)(4)(5))
def f1(a):
def f2(b):
def f3(c):
def f4(d):
def f5(e):
print(e)
return f5
return f4
return f3
return f2
f1(1)(2)(3)(4)(5)
"""
Explanation: ??? why code
End of explanation
"""
def f(x):
def g(y):
return x + y
return g
def h(x):
return lambda y: x + y
a = f(1)
b = h(1)
print(a, b)
print(a(5), b(5))
print(f(1)(5), h(1)(5))
"""
Explanation: Closures & Factory Functions <sup>1</sup>
They are techniques for implementing lexically scoped name binding with first-class functions. It is a record, storing a function together with an environment. a mapping associating each free variable of the function (variables that are used locally, but defined in an enclosing scope) with the value or reference to which the name was bound when the closure was created.
A closure—unlike a plain function—allows the function to access those captured variables through the closure's copies of their values or references, even when the function is invoked outside their scope.
End of explanation
"""
def make_adder(x):
def add(y):
return x + y
return add
plus10 = make_adder(10)
print(plus10(12)) # make_adder(10).add(12)
print(make_adder(10)(12))
"""
Explanation: both a and b are closures—or rather, variables with a closure as value—in both cases produced by returning a nested function with a free variable from an enclosing function, so that the free variable binds to the parameter x of the enclosing function. However, in the first case the nested function has a name, g, while in the second case the nested function is anonymous. The closures need not be assigned to a variable, and can be used directly, as in the last lines—the original name (if any) used in defining them is irrelevant. This usage may be deemed an "anonymous closure".
1: Copied from : "https://en.wikipedia.org/wiki/Closure_(computer_programming)"
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/awi/cmip6/models/sandbox-3/atmoschem.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'awi', 'sandbox-3', 'atmoschem')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmoschem
MIP Era: CMIP6
Institute: AWI
Source ID: SANDBOX-3
Topic: Atmoschem
Sub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry.
Properties: 84 (39 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:38
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Timestep Framework --> Split Operator Order
5. Key Properties --> Tuning Applied
6. Grid
7. Grid --> Resolution
8. Transport
9. Emissions Concentrations
10. Emissions Concentrations --> Surface Emissions
11. Emissions Concentrations --> Atmospheric Emissions
12. Emissions Concentrations --> Concentrations
13. Gas Phase Chemistry
14. Stratospheric Heterogeneous Chemistry
15. Tropospheric Heterogeneous Chemistry
16. Photo Chemistry
17. Photo Chemistry --> Photolysis
1. Key Properties
Key properties of the atmospheric chemistry
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmospheric chemistry model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmospheric chemistry model code.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Chemistry Scheme Scope
Is Required: TRUE Type: ENUM Cardinality: 1.N
Atmospheric domains covered by the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: STRING Cardinality: 1.1
Basic approximations made in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables Form
Is Required: TRUE Type: ENUM Cardinality: 1.N
Form of prognostic variables in the atmospheric chemistry component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 1.6. Number Of Tracers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of advected tracers in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.7. Family Approach
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry calculations (not advection) generalized into families of species?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.8. Coupling With Chemical Reactivity
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry transport scheme turbulence is couple with chemical reactivity?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestep Framework
Timestepping in the atmospheric chemistry model
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Mathematical method deployed to solve the evolution of a given variable
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Split Operator Advection Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemical species advection (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Split Operator Physical Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for physics (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Split Operator Chemistry Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemistry (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.5. Split Operator Alternate Order
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.6. Integrated Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep for the atmospheric chemistry model (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3.7. Integrated Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the type of timestep scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Timestep Framework --> Split Operator Order
**
4.1. Turbulence
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.2. Convection
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Precipitation
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.4. Emissions
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.5. Deposition
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.6. Gas Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.7. Tropospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.8. Stratospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.9. Photo Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.10. Aerosols
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Tuning Applied
Tuning methodology for atmospheric chemistry component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid
Atmospheric chemistry grid
6.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the atmopsheric chemistry grid
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
* Does the atmospheric chemistry grid match the atmosphere grid?*
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Resolution
Resolution in the atmospheric chemistry grid
7.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Canonical Horizontal Resolution
Is Required: FALSE Type: STRING Cardinality: 0.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.3. Number Of Horizontal Gridpoints
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.4. Number Of Vertical Levels
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 7.5. Is Adaptive Grid
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Transport
Atmospheric chemistry transport
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview of transport implementation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.2. Use Atmospheric Transport
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is transport handled by the atmosphere, rather than within atmospheric cehmistry?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Transport Details
Is Required: FALSE Type: STRING Cardinality: 0.1
If transport is handled within the atmospheric chemistry scheme, describe it.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Emissions Concentrations
Atmospheric chemistry emissions
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric chemistry emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Emissions Concentrations --> Surface Emissions
**
10.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of the chemical species emitted at the surface that are taken into account in the emissions scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via any other method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Emissions Concentrations --> Atmospheric Emissions
TO DO
11.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an "other method"
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12. Emissions Concentrations --> Concentrations
TO DO
12.1. Prescribed Lower Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the lower boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Prescribed Upper Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the upper boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Gas Phase Chemistry
Atmospheric chemistry transport
13.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview gas phase atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Species included in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.3. Number Of Bimolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of bi-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.4. Number Of Termolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of ter-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.5. Number Of Tropospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.6. Number Of Stratospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.7. Number Of Advected Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of advected species in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.8. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.9. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.10. Wet Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.11. Wet Oxidation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Stratospheric Heterogeneous Chemistry
Atmospheric chemistry startospheric heterogeneous chemistry
14.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview stratospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
"""
Explanation: 14.2. Gas Phase Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Gas phase species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
"""
Explanation: 14.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.5. Sedimentation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sedimentation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Tropospheric Heterogeneous Chemistry
Atmospheric chemistry tropospheric heterogeneous chemistry
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview tropospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Gas Phase Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of gas phase species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
"""
Explanation: 15.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.5. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the tropospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Photo Chemistry
Atmospheric chemistry photo chemistry
16.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric photo chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 16.2. Number Of Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the photo-chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
"""
Explanation: 17. Photo Chemistry --> Photolysis
Photolysis scheme
17.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Photolysis scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.2. Environmental Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.)
End of explanation
"""
|
ML4DS/ML4all | P3.Python_datos/Old/Data_python_student.ipynb | mit | %matplotlib inline
# Needed to include the figures in this notebook, you can remove it
# to work with a normal script
import numpy as np
import csv
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
"""
Explanation: Lab of data analysis with python
Author: Jesús Fernández Bes
Jerónimo Arenas García ([email protected])
Jesús Cid Sueiro ([email protected])
Notebook version: 1.1 (Sep 20, 2017)
Changes: v.1.0 - First version.
v.1.1 - Compatibility with python 2 and python 3
Pending changes:
In this lab we will introduce some of the modules that we will use in the rest of the labs of the course.
The usual beginning of any python module is a list of import statements. In most our file we will use the following modules:
numpy: The basic scientific computing library.
csv: Used for input/output in using comma separated values files, one of the standards formats in data management.
matplotlib: Used for plotting figures and graphs
sklearn: Scikit-learn is the machine learning library for python.
End of explanation
"""
my_array = np.array([[1, 2],[3, 4]])
print(my_array)
print(np.shape(my_array))
"""
Explanation: 1. NUMPY
The numpy module is useful for scientific computing in Python.
The main data structure in numpy is the n-dimensional array. You can define a numpy array from a list or a list of lists. Python will try to build it with the appropiate dimensions. You can check the dimensions of the array with shape()
End of explanation
"""
#<SOL>
#</SOL>
"""
Explanation: Define a new 3x2 array named my_array2 with [1, 2, 3] in the first row and [4,5,6] in the second.
Check the dimension of the array.
End of explanation
"""
my_array3 = my_array[:,1]
print(my_array3)
print(my_array[1,0:2])
"""
Explanation: There are a number of operations you can do with numpy arrays similar to the ones you can do with matrices in Matlab. One os the most important is slicing. We saw it when we talked about lists, it consists in extracting some subarray of the array.
End of explanation
"""
#<SOL>
#</SOL>
"""
Explanation: One important thing to consider when you do slicing are the dimensions of the output array. Check the shape of my_array3. Check also its dimension with function ndim:
End of explanation
"""
my_array3 = my_array3[:,np.newaxis]
"""
Explanation: If you have correctly computed it you will see that my_array3 is one dimensional. Sometimes this can be a problem when you are working with 2D matrixes (and vectors can be considered as 2D matrixes with one of the sizes equal to 1). To solve this, numpy provides the newaxis constant.
End of explanation
"""
#<SOL>
#</SOL>
"""
Explanation: Check again the shape and dimension of my_array3
End of explanation
"""
print(my_array[:,1])
print(my_array[:,1].shape)
print(my_array[:,1:2])
print(my_array[:,1:2].shape)
"""
Explanation: It is possible to extract a single row or column from a 2D numpy array so that the result is still 2D, without explictly recurring to np.newaxis. Compare the outputs of the following print commands.
End of explanation
"""
print(my_array)
print(my_array2)
print(np.concatenate( (my_array, my_array2) , axis=1)) # columnwise concatenation
"""
Explanation: Another important array manipulation method is array concatenation or stacking. It is useful to always state explicitly in which direction we want to stack the arrays. For example in the following example we are stacking the arrays vertically.
End of explanation
"""
#<SOL>
#</SOL>
"""
Explanation: EXERCISE: Concatenate the first column of my_array and the second column of my_array2
End of explanation
"""
#<SOL>
#</SOL>
"""
Explanation: You can create numpy arrays in several ways, not only from lists. For example numpy provides a number of functions to create special types of matrices.
EXERCISE: Create 3 arrays usings ones, zeros and eye. If you have any doubt about the parameters of the functions have a look at the help with the function help( ).
End of explanation
"""
t = np.arange(0.0, 1.0, 0.05)
a1 = np.sin(2*np.pi*t)
a2 = np.sin(4*np.pi*t)
#s = sin(2*3.14159*t)
plt.figure()
ax1 = plt.subplot(211)
ax1.plot(t,a1)
plt.xlabel('t')
plt.ylabel('a_1(t)')
ax2 = plt.subplot(212)
ax2.plot(t,a2, 'r.')
plt.xlabel('t')
plt.ylabel('a_2(t)')
plt.show()
"""
Explanation: Finally numpy provides all the basic matrix operations: multiplications, dot products, ...
You can find information about them in the Numpy manual.
In addition to numpy we have a more advanced library for scientific computing, Scipy, that includes modules for linear algebra, signal processing, Fourier transform, ...
2. Matplotlib
One important step of data analysis is data visualization. In python the simplest plotting library is matplotlib and its sintax is similar to Matlab plotting library. In the next example we plot two sinusoids with different simbols.
End of explanation
"""
# Open up the csv file in to a Python object
csv_file_object = csv.reader(open('iris_data.csv', 'r'))
datalist = [] # Create a variable called 'data'.
for row in csv_file_object: # Run through each row in the csv file,
datalist.append(row) # adding each row to the data variable
data = np.array(datalist) # Then convert from a list to an array
# Be aware that each item is currently
# a string in this format
print(np.shape(data))
X = data[:,0:-1]
label = data[:,-1,np.newaxis]
print(X.shape)
print(label.shape)
"""
Explanation: 3. Classification example
One of the main machine learning problems is clasification. In the following example, we will load and visualize a dataset that can be used in a clasification problem.
The iris dataset is one of the most popular pattern recognition datasets. It consists on 150 instances of 4 features of iris flowers:
sepal length in cm
sepal width in cm
petal length in cm
petal width in cm
The objective is usually to distinguish three different classes of iris plant: Iris setosa, Iris versicolor, and Iris virginica.
3.1 Loading the data
We give you the data in .csv format. In each line of the csv file we have the 4 real-valued features of each instance and then a string defining the class of that instance: Iris-setosa, Iris-versicolor or Iris-virginica. There are 150 instances of flowers in the csv file.
Let's se how we can load the data in an array
End of explanation
"""
#<SOL>
#</SOL>
"""
Explanation: In the previous code we have saved the features in matrix X and the class labels in the vector labels. Both are 2D numpy arrays.
We are also printing the shapes of each variable (see that we can also use array_name.shape to get the shape, appart from function shape()). Checking the shape of matrices is a convenient way to prevent mistakes in your code.
3.2 Visualizing the data
Extract the 2 first features of the data (sepal length and width) and plot the first versus the second in a figure, use a different color for the data corresponding to different classes.
First of all you probably want to split the data according to each class label.
End of explanation
"""
#<SOL>
#</SOL>
"""
Explanation: According to this plot, which classes seem more difficult to distinguish?
4. Regression example
Now that we know how to load some data and visualize them, we will try to solve a simple regression task.
Our objective in this example is to predict the crime rates in different areas of the US using some socio-demographic data.
This dataset has 127 socioeconomic variables of different nature: categorical, integer, real, and for some of them there are also missing data (check wikipedia). This is usually a problem when training machine learning models, but we will ignore that problem and take only a small number of variables that we think can be useful for regression and which have no missing values.
population: population for community
householdsize: mean people per household
medIncome: median household income
The objective in the regresion problem is another real value that contains the total number of violent crimes per 100K population.
4.1 Loading the data
First of all, load the data from file communities.csv in a new array. This array should have 1994 rows (instances) and 128 columns.
End of explanation
"""
#<SOL>
#</SOL>
"""
Explanation: Take the columns (5,6,17) of the data and save them in a matrix X_com. This will be our input data. Convert this array into a float array. The shape should be (1994,3)
Get the last column of the data and save it in an array called y_com. Convert this matrix into a float array.
Check that the shape is (1994,1) .
End of explanation
"""
#<SOL>
#</SOL>
"""
Explanation: Plot each variable in X_com versus y_com to have a first (partial) view of the data.
End of explanation
"""
#<SOL>
#</SOL>
"""
Explanation: 4.2 Train/Test splitting
Now, we are about to start doing machine learning. But, first of all, we have to separate our data in train and test partitions.
The train data will be used to adjust the parameters (train) of our model.
The test data will be used to evaluate our model.
Use sklearn.cross_validation.train_test_split to split the data in train (60%) and test (40%). Save the results in variables named X_train, X_test, y_train, y_test.
Important note
In real applications, you would have no access to any targets for the test data. However, for illustratory purposes, when evaluating machine learning algorithms it is common to set aside a test partition, including the corresponding labels, so that you can use these targets to assess the performance of the method. When proceeding in this way, the test labels should never be used during the design. It is just allowed to use them as a final assessment step once the classifier or regression model has been fully adjusted.
End of explanation
"""
#<SOL>
#</SOL>
"""
Explanation: 4.3 Normalization
Most machine learning algorithms require that the data is standardized (mean=0, standard deviation= 1). Scikit-learn provides a tool to do that in the object sklearn.preprocessing.StandardScaler (but you can also try and program it by yourself, it easier than in MATLAB!!)
End of explanation
"""
#<SOL>
#</SOL>
"""
Explanation: 4.4 Training
We will apply two different K-NN regressors for this example. One with K (n_neighbors) = 1 and the other with K=7.
Read the API and this example to understand how to fit the model.
End of explanation
"""
#<SOL>
#</SOL>
"""
Explanation: 4.5 Prediction and evaluation
Now use the two models you have trained to predict the test output y_test. To evaluate it measure the MSE.
The formula of MSE is
$$\text{MSE}=\frac{1}{K}\sum_{k=1}^{K}({\hat{y}}-y)^2$$
End of explanation
"""
#<SOL>
#</SOL>
"""
Explanation: 4.6 Saving the results
Finally we will save all our predictions for the model with K=1 in a csv file. To do so you can use the following code Snippet, where y_pred are the predicted output values for test.
End of explanation
"""
|
rashikaranpuria/Machine-Learning-Specialization | Clustering_&_Retrieval/Week2/Assignment2/.ipynb_checkpoints/1_nearest-neighbors-lsh-implementation_blank-checkpoint.ipynb | mit | import numpy as np
import graphlab
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import norm
from sklearn.metrics.pairwise import pairwise_distances
import time
from copy import copy
import matplotlib.pyplot as plt
%matplotlib inline
"""
Explanation: Locality Sensitive Hashing
Locality Sensitive Hashing (LSH) provides for a fast, efficient approximate nearest neighbor search. The algorithm scales well with respect to the number of data points as well as dimensions.
In this assignment, you will
* Implement the LSH algorithm for approximate nearest neighbor search
* Examine the accuracy for different documents by comparing against brute force search, and also contrast runtimes
* Explore the role of the algorithm’s tuning parameters in the accuracy of the method
Note to Amazon EC2 users: To conserve memory, make sure to stop all the other notebooks before running this notebook.
Import necessary packages
End of explanation
"""
# !conda upgrade -y scipy
"""
Explanation: Upgrading to Scipy 0.16.0 or later. This assignment requires SciPy 0.16.0 or later. To upgrade, uncomment and run the following cell:
End of explanation
"""
wiki = graphlab.SFrame('people_wiki.gl/')
"""
Explanation: Load in the Wikipedia dataset
End of explanation
"""
wiki = wiki.add_row_number()
wiki
"""
Explanation: For this assignment, let us assign a unique ID to each document.
End of explanation
"""
wiki['tf_idf'] = graphlab.text_analytics.tf_idf(wiki['text'])
wiki
"""
Explanation: Extract TF-IDF matrix
We first use GraphLab Create to compute a TF-IDF representation for each document.
End of explanation
"""
def sframe_to_scipy(column):
"""
Convert a dict-typed SArray into a SciPy sparse matrix.
Returns
-------
mat : a SciPy sparse matrix where mat[i, j] is the value of word j for document i.
mapping : a dictionary where mapping[j] is the word whose values are in column j.
"""
# Create triples of (row_id, feature_id, count).
x = graphlab.SFrame({'X1':column})
# 1. Add a row number.
x = x.add_row_number()
# 2. Stack will transform x to have a row for each unique (row, key) pair.
x = x.stack('X1', ['feature', 'value'])
# Map words into integers using a OneHotEncoder feature transformation.
f = graphlab.feature_engineering.OneHotEncoder(features=['feature'])
# We first fit the transformer using the above data.
f.fit(x)
# The transform method will add a new column that is the transformed version
# of the 'word' column.
x = f.transform(x)
# Get the feature mapping.
mapping = f['feature_encoding']
# Get the actual word id.
x['feature_id'] = x['encoded_features'].dict_keys().apply(lambda x: x[0])
# Create numpy arrays that contain the data for the sparse matrix.
i = np.array(x['id'])
j = np.array(x['feature_id'])
v = np.array(x['value'])
width = x['id'].max() + 1
height = x['feature_id'].max() + 1
# Create a sparse matrix.
mat = csr_matrix((v, (i, j)), shape=(width, height))
return mat, mapping
"""
Explanation: For the remainder of the assignment, we will use sparse matrices. Sparse matrices are [matrices](https://en.wikipedia.org/wiki/Matrix_(mathematics%29 ) that have a small number of nonzero entries. A good data structure for sparse matrices would only store the nonzero entries to save space and speed up computation. SciPy provides a highly-optimized library for sparse matrices. Many matrix operations available for NumPy arrays are also available for SciPy sparse matrices.
We first convert the TF-IDF column (in dictionary format) into the SciPy sparse matrix format.
End of explanation
"""
start=time.time()
corpus, mapping = sframe_to_scipy(wiki['tf_idf'])
end=time.time()
print end-start
"""
Explanation: The conversion should take a few minutes to complete.
End of explanation
"""
assert corpus.shape == (59071, 547979)
print 'Check passed correctly!'
"""
Explanation: Checkpoint: The following code block should return 'Check passed correctly', indicating that your matrix contains TF-IDF values for 59071 documents and 547979 unique words. Otherwise, it will return Error.
End of explanation
"""
def generate_random_vectors(num_vector, dim):
return np.random.randn(dim, num_vector)
"""
Explanation: Train an LSH model
LSH performs an efficient neighbor search by randomly partitioning all reference data points into different bins. Today we will build a popular variant of LSH known as random binary projection, which approximates cosine distance. There are other variants we could use for other choices of distance metrics.
The first step is to generate a collection of random vectors from the standard Gaussian distribution.
End of explanation
"""
# Generate 3 random vectors of dimension 5, arranged into a single 5 x 3 matrix.
np.random.seed(0) # set seed=0 for consistent results
generate_random_vectors(num_vector=3, dim=5)
"""
Explanation: To visualize these Gaussian random vectors, let's look at an example in low-dimensions. Below, we generate 3 random vectors each of dimension 5.
End of explanation
"""
# Generate 16 random vectors of dimension 547979
np.random.seed(0)
random_vectors = generate_random_vectors(num_vector=16, dim=547979)
random_vectors.shape
"""
Explanation: We now generate random vectors of the same dimensionality as our vocubulary size (547979). Each vector can be used to compute one bit in the bin encoding. We generate 16 vectors, leading to a 16-bit encoding of the bin index for each document.
End of explanation
"""
doc = corpus[0, :] # vector of tf-idf values for document 0
doc.dot(random_vectors[:, 0]) >= 0 # True if positive sign; False if negative sign
"""
Explanation: Next, we partition data points into bins. Instead of using explicit loops, we'd like to utilize matrix operations for greater efficiency. Let's walk through the construction step by step.
We'd like to decide which bin document 0 should go. Since 16 random vectors were generated in the previous cell, we have 16 bits to represent the bin index. The first bit is given by the sign of the dot product between the first random vector and the document's TF-IDF vector.
End of explanation
"""
doc.dot(random_vectors[:, 1]) >= 0 # True if positive sign; False if negative sign
"""
Explanation: Similarly, the second bit is computed as the sign of the dot product between the second random vector and the document vector.
End of explanation
"""
doc.dot(random_vectors) >= 0 # should return an array of 16 True/False bits
np.array(doc.dot(random_vectors) >= 0, dtype=int) # display index bits in 0/1's
"""
Explanation: We can compute all of the bin index bits at once as follows. Note the absence of the explicit for loop over the 16 vectors. Matrix operations let us batch dot-product computation in a highly efficent manner, unlike the for loop construction. Given the relative inefficiency of loops in Python, the advantage of matrix operations is even greater.
End of explanation
"""
corpus[0:2].dot(random_vectors) >= 0 # compute bit indices of first two documents
corpus.dot(random_vectors) >= 0 # compute bit indices of ALL documents
"""
Explanation: All documents that obtain exactly this vector will be assigned to the same bin. We'd like to repeat the identical operation on all documents in the Wikipedia dataset and compute the corresponding bin indices. Again, we use matrix operations so that no explicit loop is needed.
End of explanation
"""
doc = corpus[0, :] # first document
index_bits = (doc.dot(random_vectors) >= 0)
powers_of_two = (1 << np.arange(15, -1, -1))
print index_bits
print powers_of_two
print index_bits.dot(powers_of_two)
"""
Explanation: We're almost done! To make it convenient to refer to individual bins, we convert each binary bin index into a single integer:
Bin index integer
[0,0,0,0,0,0,0,0,0,0,0,0] => 0
[0,0,0,0,0,0,0,0,0,0,0,1] => 1
[0,0,0,0,0,0,0,0,0,0,1,0] => 2
[0,0,0,0,0,0,0,0,0,0,1,1] => 3
...
[1,1,1,1,1,1,1,1,1,1,0,0] => 65532
[1,1,1,1,1,1,1,1,1,1,0,1] => 65533
[1,1,1,1,1,1,1,1,1,1,1,0] => 65534
[1,1,1,1,1,1,1,1,1,1,1,1] => 65535 (= 2^16-1)
By the rules of binary number representation, we just need to compute the dot product between the document vector and the vector consisting of powers of 2:
End of explanation
"""
index_bits = corpus.dot(random_vectors) >= 0
index_bits.dot(powers_of_two)
"""
Explanation: Since it's the dot product again, we batch it with a matrix operation:
End of explanation
"""
def train_lsh(data, num_vector=16, seed=None):
dim = data.shape[1]
if seed is not None:
np.random.seed(seed)
random_vectors = generate_random_vectors(num_vector, dim)
powers_of_two = 1 << np.arange(num_vector-1, -1, -1)
table = {}
# Partition data points into bins
bin_index_bits = (data.dot(random_vectors) >= 0)
# Encode bin index bits into integers
bin_indices = bin_index_bits.dot(powers_of_two)
# Update `table` so that `table[i]` is the list of document ids with bin index equal to i.
for data_index, bin_index in enumerate(bin_indices):
if bin_index not in table:
# If no list yet exists for this bin, assign the bin an empty list.
table[bin_index] = ... # YOUR CODE HERE
# Fetch the list of document ids associated with the bin and add the document id to the end.
... # YOUR CODE HERE
model = {'data': data,
'bin_index_bits': bin_index_bits,
'bin_indices': bin_indices,
'table': table,
'random_vectors': random_vectors,
'num_vector': num_vector}
return model
"""
Explanation: This array gives us the integer index of the bins for all documents.
Now we are ready to complete the following function. Given the integer bin indices for the documents, you should compile a list of document IDs that belong to each bin. Since a list is to be maintained for each unique bin index, a dictionary of lists is used.
Compute the integer bin indices. This step is already completed.
For each document in the dataset, do the following:
Get the integer bin index for the document.
Fetch the list of document ids associated with the bin; if no list yet exists for this bin, assign the bin an empty list.
Add the document id to the end of the list.
End of explanation
"""
model = train_lsh(corpus, num_vector=16, seed=143)
table = model['table']
if 0 in table and table[0] == [39583] and \
143 in table and table[143] == [19693, 28277, 29776, 30399]:
print 'Passed!'
else:
print 'Check your code.'
"""
Explanation: Checkpoint.
End of explanation
"""
wiki[wiki['name'] == 'Barack Obama']
"""
Explanation: Note. We will be using the model trained here in the following sections, unless otherwise indicated.
Inspect bins
Let us look at some documents and see which bins they fall into.
End of explanation
"""
wiki[wiki['name'] == 'Joe Biden']
"""
Explanation: Quiz Question. What is the document id of Barack Obama's article?
Quiz Question. Which bin contains Barack Obama's article? Enter its integer index.
Recall from the previous assignment that Joe Biden was a close neighbor of Barack Obama.
End of explanation
"""
wiki[wiki['name']=='Wynn Normington Hugh-Jones']
print np.array(model['bin_index_bits'][22745], dtype=int) # list of 0/1's
print model['bin_indices'][22745] # integer format
model['bin_index_bits'][35817] == model['bin_index_bits'][22745]
"""
Explanation: Quiz Question. Examine the bit representations of the bins containing Barack Obama and Joe Biden. In how many places do they agree?
16 out of 16 places (Barack Obama and Joe Biden fall into the same bin)
14 out of 16 places
12 out of 16 places
10 out of 16 places
8 out of 16 places
Compare the result with a former British diplomat, whose bin representation agrees with Obama's in only 8 out of 16 places.
End of explanation
"""
model['table'][model['bin_indices'][35817]]
"""
Explanation: How about the documents in the same bin as Barack Obama? Are they necessarily more similar to Obama than Biden? Let's look at which documents are in the same bin as the Barack Obama article.
End of explanation
"""
doc_ids = list(model['table'][model['bin_indices'][35817]])
doc_ids.remove(35817) # display documents other than Obama
docs = wiki.filter_by(values=doc_ids, column_name='id') # filter by id column
docs
"""
Explanation: There are four other documents that belong to the same bin. Which documents are they?
End of explanation
"""
def cosine_distance(x, y):
xy = x.dot(y.T)
dist = xy/(norm(x)*norm(y))
return 1-dist[0,0]
obama_tf_idf = corpus[35817,:]
biden_tf_idf = corpus[24478,:]
print '================= Cosine distance from Barack Obama'
print 'Barack Obama - {0:24s}: {1:f}'.format('Joe Biden',
cosine_distance(obama_tf_idf, biden_tf_idf))
for doc_id in doc_ids:
doc_tf_idf = corpus[doc_id,:]
print 'Barack Obama - {0:24s}: {1:f}'.format(wiki[doc_id]['name'],
cosine_distance(obama_tf_idf, doc_tf_idf))
"""
Explanation: It turns out that Joe Biden is much closer to Barack Obama than any of the four documents, even though Biden's bin representation differs from Obama's by 2 bits.
End of explanation
"""
from itertools import combinations
num_vector = 16
search_radius = 3
for diff in combinations(range(num_vector), search_radius):
print diff
"""
Explanation: Moral of the story. Similar data points will in general tend to fall into nearby bins, but that's all we can say about LSH. In a high-dimensional space such as text features, we often get unlucky with our selection of only a few random vectors such that dissimilar data points go into the same bin while similar data points fall into different bins. Given a query document, we must consider all documents in the nearby bins and sort them according to their actual distances from the query.
Query the LSH model
Let us first implement the logic for searching nearby neighbors, which goes like this:
1. Let L be the bit representation of the bin that contains the query documents.
2. Consider all documents in bin L.
3. Consider documents in the bins whose bit representation differs from L by 1 bit.
4. Consider documents in the bins whose bit representation differs from L by 2 bits.
...
To obtain candidate bins that differ from the query bin by some number of bits, we use itertools.combinations, which produces all possible subsets of a given list. See this documentation for details.
1. Decide on the search radius r. This will determine the number of different bits between the two vectors.
2. For each subset (n_1, n_2, ..., n_r) of the list [0, 1, 2, ..., num_vector-1], do the following:
* Flip the bits (n_1, n_2, ..., n_r) of the query bin to produce a new bit vector.
* Fetch the list of documents belonging to the bin indexed by the new bit vector.
* Add those documents to the candidate set.
Each line of output from the following cell is a 3-tuple indicating where the candidate bin would differ from the query bin. For instance,
(0, 1, 3)
indicates that the candiate bin differs from the query bin in first, second, and fourth bits.
End of explanation
"""
def search_nearby_bins(query_bin_bits, table, search_radius=2, initial_candidates=set()):
"""
For a given query vector and trained LSH model, return all candidate neighbors for
the query among all bins within the given search radius.
Example usage
-------------
>>> model = train_lsh(corpus, num_vector=16, seed=143)
>>> q = model['bin_index_bits'][0] # vector for the first document
>>> candidates = search_nearby_bins(q, model['table'])
"""
num_vector = len(query_bin_bits)
powers_of_two = 1 << np.arange(num_vector-1, -1, -1)
# Allow the user to provide an initial set of candidates.
candidate_set = copy(initial_candidates)
for different_bits in combinations(range(num_vector), search_radius):
# Flip the bits (n_1,n_2,...,n_r) of the query bin to produce a new bit vector.
## Hint: you can iterate over a tuple like a list
alternate_bits = copy(query_bin_bits)
for i in different_bits:
alternate_bits[i] = ... # YOUR CODE HERE
# Convert the new bit vector to an integer index
nearby_bin = alternate_bits.dot(powers_of_two)
# Fetch the list of documents belonging to the bin indexed by the new bit vector.
# Then add those documents to candidate_set
# Make sure that the bin exists in the table!
# Hint: update() method for sets lets you add an entire list to the set
if nearby_bin in table:
... # YOUR CODE HERE: Update candidate_set with the documents in this bin.
return candidate_set
"""
Explanation: With this output in mind, implement the logic for nearby bin search:
End of explanation
"""
obama_bin_index = model['bin_index_bits'][35817] # bin index of Barack Obama
candidate_set = search_nearby_bins(obama_bin_index, model['table'], search_radius=0)
if candidate_set == set([35817, 21426, 53937, 39426, 50261]):
print 'Passed test'
else:
print 'Check your code'
print 'List of documents in the same bin as Obama: 35817, 21426, 53937, 39426, 50261'
"""
Explanation: Checkpoint. Running the function with search_radius=0 should yield the list of documents belonging to the same bin as the query.
End of explanation
"""
candidate_set = search_nearby_bins(obama_bin_index, model['table'], search_radius=1, initial_candidates=candidate_set)
if candidate_set == set([39426, 38155, 38412, 28444, 9757, 41631, 39207, 59050, 47773, 53937, 21426, 34547,
23229, 55615, 39877, 27404, 33996, 21715, 50261, 21975, 33243, 58723, 35817, 45676,
19699, 2804, 20347]):
print 'Passed test'
else:
print 'Check your code'
"""
Explanation: Checkpoint. Running the function with search_radius=1 adds more documents to the fore.
End of explanation
"""
def query(vec, model, k, max_search_radius):
data = model['data']
table = model['table']
random_vectors = model['random_vectors']
num_vector = random_vectors.shape[1]
# Compute bin index for the query vector, in bit representation.
bin_index_bits = (vec.dot(random_vectors) >= 0).flatten()
# Search nearby bins and collect candidates
candidate_set = set()
for search_radius in xrange(max_search_radius+1):
candidate_set = search_nearby_bins(bin_index_bits, table, search_radius, initial_candidates=candidate_set)
# Sort candidates by their true distances from the query
nearest_neighbors = graphlab.SFrame({'id':candidate_set})
candidates = data[np.array(list(candidate_set)),:]
nearest_neighbors['distance'] = pairwise_distances(candidates, vec, metric='cosine').flatten()
return nearest_neighbors.topk('distance', k, reverse=True), len(candidate_set)
"""
Explanation: Note. Don't be surprised if few of the candidates look similar to Obama. This is why we add as many candidates as our computational budget allows and sort them by their distance to the query.
Now we have a function that can return all the candidates from neighboring bins. Next we write a function to collect all candidates and compute their true distance to the query.
End of explanation
"""
query(corpus[35817,:], model, k=10, max_search_radius=3)
"""
Explanation: Let's try it out with Obama:
End of explanation
"""
query(corpus[35817,:], model, k=10, max_search_radius=3)[0].join(wiki[['id', 'name']], on='id').sort('distance')
"""
Explanation: To identify the documents, it's helpful to join this table with the Wikipedia table:
End of explanation
"""
wiki[wiki['name']=='Barack Obama']
num_candidates_history = []
query_time_history = []
max_distance_from_query_history = []
min_distance_from_query_history = []
average_distance_from_query_history = []
for max_search_radius in xrange(17):
start=time.time()
result, num_candidates = query(corpus[35817,:], model, k=10,
max_search_radius=max_search_radius)
end=time.time()
query_time = end-start
print 'Radius:', max_search_radius
print result.join(wiki[['id', 'name']], on='id').sort('distance')
average_distance_from_query = result['distance'][1:].mean()
max_distance_from_query = result['distance'][1:].max()
min_distance_from_query = result['distance'][1:].min()
num_candidates_history.append(num_candidates)
query_time_history.append(query_time)
average_distance_from_query_history.append(average_distance_from_query)
max_distance_from_query_history.append(max_distance_from_query)
min_distance_from_query_history.append(min_distance_from_query)
"""
Explanation: We have shown that we have a working LSH implementation!
Experimenting with your LSH implementation
In the following sections we have implemented a few experiments so that you can gain intuition for how your LSH implementation behaves in different situations. This will help you understand the effect of searching nearby bins and the performance of LSH versus computing nearest neighbors using a brute force search.
Effect of nearby bin search
How does nearby bin search affect the outcome of LSH? There are three variables that are affected by the search radius:
* Number of candidate documents considered
* Query time
* Distance of approximate neighbors from the query
Let us run LSH multiple times, each with different radii for nearby bin search. We will measure the three variables as discussed above.
End of explanation
"""
plt.figure(figsize=(7,4.5))
plt.plot(num_candidates_history, linewidth=4)
plt.xlabel('Search radius')
plt.ylabel('# of documents searched')
plt.rcParams.update({'font.size':16})
plt.tight_layout()
plt.figure(figsize=(7,4.5))
plt.plot(query_time_history, linewidth=4)
plt.xlabel('Search radius')
plt.ylabel('Query time (seconds)')
plt.rcParams.update({'font.size':16})
plt.tight_layout()
plt.figure(figsize=(7,4.5))
plt.plot(average_distance_from_query_history, linewidth=4, label='Average of 10 neighbors')
plt.plot(max_distance_from_query_history, linewidth=4, label='Farthest of 10 neighbors')
plt.plot(min_distance_from_query_history, linewidth=4, label='Closest of 10 neighbors')
plt.xlabel('Search radius')
plt.ylabel('Cosine distance of neighbors')
plt.legend(loc='best', prop={'size':15})
plt.rcParams.update({'font.size':16})
plt.tight_layout()
"""
Explanation: Notice that the top 10 query results become more relevant as the search radius grows. Let's plot the three variables:
End of explanation
"""
def brute_force_query(vec, data, k):
num_data_points = data.shape[0]
# Compute distances for ALL data points in training set
nearest_neighbors = graphlab.SFrame({'id':range(num_data_points)})
nearest_neighbors['distance'] = pairwise_distances(data, vec, metric='cosine').flatten()
return nearest_neighbors.topk('distance', k, reverse=True)
"""
Explanation: Some observations:
* As we increase the search radius, we find more neighbors that are a smaller distance away.
* With increased search radius comes a greater number documents that have to be searched. Query time is higher as a consequence.
* With sufficiently high search radius, the results of LSH begin to resemble the results of brute-force search.
Quiz Question. What was the smallest search radius that yielded the correct nearest neighbor, namely Joe Biden?
Quiz Question. Suppose our goal was to produce 10 approximate nearest neighbors whose average distance from the query document is within 0.01 of the average for the true 10 nearest neighbors. For Barack Obama, the true 10 nearest neighbors are on average about 0.77. What was the smallest search radius for Barack Obama that produced an average distance of 0.78 or better?
Quality metrics for neighbors
The above analysis is limited by the fact that it was run with a single query, namely Barack Obama. We should repeat the analysis for the entirety of data. Iterating over all documents would take a long time, so let us randomly choose 10 documents for our analysis.
For each document, we first compute the true 25 nearest neighbors, and then run LSH multiple times. We look at two metrics:
Precision@10: How many of the 10 neighbors given by LSH are among the true 25 nearest neighbors?
Average cosine distance of the neighbors from the query
Then we run LSH multiple times with different search radii.
End of explanation
"""
max_radius = 17
precision = {i:[] for i in xrange(max_radius)}
average_distance = {i:[] for i in xrange(max_radius)}
query_time = {i:[] for i in xrange(max_radius)}
np.random.seed(0)
num_queries = 10
for i, ix in enumerate(np.random.choice(corpus.shape[0], num_queries, replace=False)):
print('%s / %s' % (i, num_queries))
ground_truth = set(brute_force_query(corpus[ix,:], corpus, k=25)['id'])
# Get the set of 25 true nearest neighbors
for r in xrange(1,max_radius):
start = time.time()
result, num_candidates = query(corpus[ix,:], model, k=10, max_search_radius=r)
end = time.time()
query_time[r].append(end-start)
# precision = (# of neighbors both in result and ground_truth)/10.0
precision[r].append(len(set(result['id']) & ground_truth)/10.0)
average_distance[r].append(result['distance'][1:].mean())
plt.figure(figsize=(7,4.5))
plt.plot(range(1,17), [np.mean(average_distance[i]) for i in xrange(1,17)], linewidth=4, label='Average over 10 neighbors')
plt.xlabel('Search radius')
plt.ylabel('Cosine distance')
plt.legend(loc='best', prop={'size':15})
plt.rcParams.update({'font.size':16})
plt.tight_layout()
plt.figure(figsize=(7,4.5))
plt.plot(range(1,17), [np.mean(precision[i]) for i in xrange(1,17)], linewidth=4, label='Precison@10')
plt.xlabel('Search radius')
plt.ylabel('Precision')
plt.legend(loc='best', prop={'size':15})
plt.rcParams.update({'font.size':16})
plt.tight_layout()
plt.figure(figsize=(7,4.5))
plt.plot(range(1,17), [np.mean(query_time[i]) for i in xrange(1,17)], linewidth=4, label='Query time')
plt.xlabel('Search radius')
plt.ylabel('Query time (seconds)')
plt.legend(loc='best', prop={'size':15})
plt.rcParams.update({'font.size':16})
plt.tight_layout()
"""
Explanation: The following cell will run LSH with multiple search radii and compute the quality metrics for each run. Allow a few minutes to complete.
End of explanation
"""
precision = {i:[] for i in xrange(5,20)}
average_distance = {i:[] for i in xrange(5,20)}
query_time = {i:[] for i in xrange(5,20)}
num_candidates_history = {i:[] for i in xrange(5,20)}
ground_truth = {}
np.random.seed(0)
num_queries = 10
docs = np.random.choice(corpus.shape[0], num_queries, replace=False)
for i, ix in enumerate(docs):
ground_truth[ix] = set(brute_force_query(corpus[ix,:], corpus, k=25)['id'])
# Get the set of 25 true nearest neighbors
for num_vector in xrange(5,20):
print('num_vector = %s' % (num_vector))
model = train_lsh(corpus, num_vector, seed=143)
for i, ix in enumerate(docs):
start = time.time()
result, num_candidates = query(corpus[ix,:], model, k=10, max_search_radius=3)
end = time.time()
query_time[num_vector].append(end-start)
precision[num_vector].append(len(set(result['id']) & ground_truth[ix])/10.0)
average_distance[num_vector].append(result['distance'][1:].mean())
num_candidates_history[num_vector].append(num_candidates)
plt.figure(figsize=(7,4.5))
plt.plot(range(5,20), [np.mean(average_distance[i]) for i in xrange(5,20)], linewidth=4, label='Average over 10 neighbors')
plt.xlabel('# of random vectors')
plt.ylabel('Cosine distance')
plt.legend(loc='best', prop={'size':15})
plt.rcParams.update({'font.size':16})
plt.tight_layout()
plt.figure(figsize=(7,4.5))
plt.plot(range(5,20), [np.mean(precision[i]) for i in xrange(5,20)], linewidth=4, label='Precison@10')
plt.xlabel('# of random vectors')
plt.ylabel('Precision')
plt.legend(loc='best', prop={'size':15})
plt.rcParams.update({'font.size':16})
plt.tight_layout()
plt.figure(figsize=(7,4.5))
plt.plot(range(5,20), [np.mean(query_time[i]) for i in xrange(5,20)], linewidth=4, label='Query time (seconds)')
plt.xlabel('# of random vectors')
plt.ylabel('Query time (seconds)')
plt.legend(loc='best', prop={'size':15})
plt.rcParams.update({'font.size':16})
plt.tight_layout()
plt.figure(figsize=(7,4.5))
plt.plot(range(5,20), [np.mean(num_candidates_history[i]) for i in xrange(5,20)], linewidth=4,
label='# of documents searched')
plt.xlabel('# of random vectors')
plt.ylabel('# of documents searched')
plt.legend(loc='best', prop={'size':15})
plt.rcParams.update({'font.size':16})
plt.tight_layout()
"""
Explanation: The observations for Barack Obama generalize to the entire dataset.
Effect of number of random vectors
Let us now turn our focus to the remaining parameter: the number of random vectors. We run LSH with different number of random vectors, ranging from 5 to 20. We fix the search radius to 3.
Allow a few minutes for the following cell to complete.
End of explanation
"""
|
barjacks/pythonrecherche | 02 jupyter notebook, intro python I/02 Jupyter Notebook & Python Intro.ipynb | mit | #dsfdskjfbskjdfbdkjbfkjdbf
#asdasd
"""
Explanation: Jupyter Notebook & Python Intro
Zuerst navigieren wir mit der Kommandozeile in den Folder, wo wir das Jupyter Notebook abspeichern wollen. Dann gehen wir in unser virtual environment und starten mit "jupyter notebook" unser Notebook auf. Jupyter Notebook ist eine Arbeitsoberfläche, der für Coding-Anfänger sehr einfach zu bedienen ist, denn es lassen sich Code-Teile einzelnen abspielen.
Es gibt zwei Formate der Zellen. Code-Format und sogenanntes Markdown. Letzteres ist ein Textformat, das dem Text möglichst wenige Formatinfos anhängt. Nicht wie Word zum Beispiel. Wenn man grosse Notebooks entwickelt, ist es sehr hilfreich damit zu arbeiten. Zum Beispiel
Titel
Titel
Titel
Titel
Titel
End of explanation
"""
#Mit einem Hashtag vor einer Zeile können wir Code kommentieren, auch das ist sehr wichtig.
#Immer, wirklich, immer den eigenen Code zu kommentieren. Vor allem am Anfang.
print("hello world")
#Der Printbefehl druckt einfach alles aus. Nicht wirklich wahnsinnig toll.
#Doch er ist später sehr nützlich. Vorallem wenn es darum geht Fehler im eigenn Code zu finden.
#Mit dem Inputbefehl kannst Du Den Nutzer mit dem intergieren.
input('wie alt bis Du?')
"""
Explanation: sad
hello
Oder Aufzählungen, Fetten. Das geht alles mit Markdown. Man kann sogar Tabellen bauen oder Hyper Links setzen. Wie zum Beispiel auf dieses Markdown Cheatsheet. Hier sind weitere sehr praktische Format. In der Regel benutzten wir Jupyter Notebooks aber nicht, um zu texten, sondern zu coden. Legen wir los.
Print und Input
Datentypen
Aktionen
Variablen und Zuordnungen
If, elif, else
Lists
Dictionaries
Tuples
Simple Funktionen: len, sort, sorted
For Loop
Python
Print und Input
End of explanation
"""
#Strings
'Hallo wie "geht es Dir"'
"12345"
124
str(124)
#Integer
type(567)
type(int('1234'))
#Floats
4.542323
float(12)
int(4.64)
#Dates, eigentlich Strings
'15-11-2019'
"""
Explanation: Datentypen
End of explanation
"""
print('Hallo' + ' '+ 'wie' + 'geht' + 'es')
print('Hallo','wie','geht','es')
#Alle anderen gängigen:
#minus -
#Mal *
#geteilt durch /
#Spezial: Modulo. %, geteilt durch und der Rest, der übrigbleibt
22 % 5
2
"""
Explanation: Aktionen
End of explanation
"""
#Grösser und kleiner als:
#< >
#Gleich == (wichtig, doppelte Gleichzeichen)
#Denn das einfach definiert eine Variable
'Schweiz' == 'Schweiz'
Schweiz = 'reich'
Schweiz
Schweiz == 'reich'
reich = 'arm'
1 = 'reich'
"5schweiz"
1 = 6
a = 34
a = b
a = 'b'
a == 'b'
a
"""
Explanation: Variablen, Vergleiche und Zuordnungen von Variablen
End of explanation
"""
elem = int(input('Wie alt bist Du?'))
elem
if elem < 0:
print('Das ist unmöglich')
else:
print('Du bist aber alt')
elem = int(input('Wie alt bist Du?'))
if elem < 0:
print('Das ist unmöglich')
elif elem < 25:
print('Du bist aber jung')
else:
print('Du bist aber alt')
"""
Explanation: if - else - (elif)
End of explanation
"""
#Eckige Klammern
[1,"hallo",3,4,5.23,6,7]
lst = [1,2,3,4,5,6,7]
lst
#Einzelene Elemente
lst[0]
#Ganze Abschnitte
lst[:4]
#Komplexere Schnitte
lst[::3]
lst
#Append, Pop, etc.
saved_item = lst.pop()
lst
lst.append(saved_item)
list
#Aufpassen mit Befehl: list weil das macht aus etwas eine Liste. Auch aus Strings:
list('hallo wie geht')
range(0,10)
#Elegantester Weg, eine Liste zu schreiben. Und ganz wichtig,
#der Computer beginn immer bei 0.
list(range(10))
list(range(9,-1,-1))
"""
Explanation: Lists
End of explanation
"""
#Komische, geschwungene Klammern
{'Tier': 'Hund', 'Grösse': 124, 'Alter': 10}
dct = {'Tier': 'Hund', 'Grösse': 124, 'Alter': 10}
dct
dct['Grösse']
#List of Dictionaires
dct_lst = [{'Tier': 'Hund', 'Grösse': 124, 'Alter': 10}, {'Tier': 'Katze', 'Grösse': 130, 'Alter': 8}]
type(dct_lst)
dct_lst[1]
dct_lst[0]['Alter']
neue_list = []
for xxxxxxxxxxxx in dct_lst:
neue_list.append(xxxxxxxxxxxx['Alter'])
neue_list
"""
Explanation: Dictionaries
Verwende hier die geschwungene Klammern
End of explanation
"""
lst
tuple(lst)
lst
lst = tuple(lst)
lst
#Unveränderbar. Also gutes Format, um Sachen abzuspeichern.
#Aber das wirklich nur der Vollständigkeitshalber.
"""
Explanation: Tuples
Hier sind runde Klammern König.
End of explanation
"""
#len mit Strings
len('hallo wie geht es Dir')
#len mit Lists
len([1,2,3,4,4,5])
#len mit dictionaries
len({'Tier': 'Hund', 'Alter': 345})
#len mit Tuples
len((1,1,1,2,2,1))
#sorted für momentane Sortierung
sorted('hallo wie geht es Dir')
a = 'hallo wie geht es Dir'
sorted(a)
a
#Sort funktioniert allerdings "nur" mit lists
lst = [1, 5, 9, 10, 34, 12, 12, 14]
lst.sort()
lst
dic = {'Tier': 'Hund', 'Alter': 345}
dic.sort()
"""
Explanation: Simple Funktionen - len und sort
Beachte wie man die aufruft. Nämlich mit runden Klammern
End of explanation
"""
lst
for hghjgfjhf in lst:
print(x)
dicbkjghkg = {'Tier': 'Hund', 'Alter': 345}
for key, value in dicbkjghkg.items():
print(key, value)
#for loop to make new lists
lst
#Nehmen wir einmal an, wir wollen nur die geraden Zahlen in der Liste
new_lst = []
for elem in lst:
if elem % 2 == 0:
new_lst.append(elem)
# else:
# continue
new_lst
"""
Explanation: For Loop
End of explanation
"""
dic_lst = [{'Animal': 'Dog', 'Size': 45},
{'Animal': 'Cat', 'Size': 23},
{'Animal': 'Bird', 'Size': 121212}]
for dic in dic_lst:
print(dic)
for dic in dic_lst:
print(dic['Animal'])
for dic in dic_lst:
print(dic['Animal'] + ': '+ dic['Size']))
"""
Explanation: For loop with list of dictionaries
End of explanation
"""
|
BYUFLOWLab/MDOnotebooks | AD.ipynb | mit | from math import pi
import numpy as np
from math import sin, cos, acos, exp, sqrt
def inductionFactors(r, chord, Rhub, Rtip, phi, cl, cd, B,
Vx, Vy, useCd, hubLoss, tipLoss, wakerotation):
"""Computes induction factors and residual error at a given location
on the blade. Full details on inputs/outputs ommitted here."""
sigma_p = B/2.0/pi*chord/r
sphi = sin(phi)
cphi = cos(phi)
# resolve into normal and tangential forces
if not useCd:
cn = cl*cphi
ct = cl*sphi
else:
cn = cl*cphi + cd*sphi
ct = cl*sphi - cd*cphi
# Prandtl's tip and hub loss factor
Ftip = 1.0
if tipLoss:
factortip = B/2.0*(Rtip - r)/(r*abs(sphi))
Ftip = 2.0/pi*acos(exp(-factortip))
Fhub = 1.0
if hubLoss:
factorhub = B/2.0*(r - Rhub)/(Rhub*abs(sphi))
Fhub = 2.0/pi*acos(exp(-factorhub))
F = Ftip * Fhub
# bem parameters
k = sigma_p*cn/4.0/F/sphi/sphi
kp = sigma_p*ct/4.0/F/sphi/cphi
# compute axial induction factor
if phi > 0.0: # momentum/empirical
# update axial induction factor
if k <= 2.0/3.0: # momentum state
a = k/(1+k)
else: # Glauert(Buhl) correction
g1 = 2.0*F*k - (10.0/9-F)
g2 = 2.0*F*k - (4.0/3-F)*F
g3 = 2.0*F*k - (25.0/9-2*F)
if abs(g3) < 1e-6: # avoid singularity
a = 1.0 - 1.0/2.0/sqrt(g2)
else:
a = (g1 - sqrt(g2)) / g3
else: # propeller brake region (a and ap not directly used but update anyway)
if k > 1.0:
a = k/(k-1.0)
else:
a = 0.0 # dummy value
# compute tangential induction factor
ap = kp/(1.0-kp)
if not wakerotation:
ap = 0.0
kp = 0.0
# error function
lambda_r = Vy/Vx
if phi > 0: # momentum/empirical
fzero = sphi/(1.0-a) - cphi/lambda_r*(1.0-kp)
else: # propeller brake region
fzero = sphi*(1.0-k) - cphi/lambda_r*(1.0-kp)
return fzero, a, ap
"""
Explanation: Automatic Differentiation
This example demonstrates automatic differentiation using both an operator overloading method and a source code transformation method. The function we will use is reasonably complex. It is a routine from my blade element momentum code (available here). The full code is much more involved, but we will focus on just one function to keep things simpler. The actual routine is written in Fortran but I've converted it into Python for this demonstration. The function has several inputs, and three outputs. For simplicity we will focus on just the derivatives of the first output, although all are available.
End of explanation
"""
# wrap function
def function(x, params):
# unpack variables
r, chord, Rhub, Rtip, phi, cl, cd, Vx, Vy = x
B, useCd, hubLoss, tipLoss, wakerotation = params
# call the original function
return inductionFactors(r, chord, Rhub, Rtip, phi, cl, cd, B,
Vx, Vy, useCd, hubLoss, tipLoss, wakerotation)
"""
Explanation: Just for convenience, we are going to wrap this function. Our wrapper will take two inputs: x and params. The x vector will contain all the variables that we want to take derivatives with respect to. The params are parameters that do not change during a simulation and so we don't need derivatives with respect to them (these are things like B the number of blades, and various boolean options like useCd).
End of explanation
"""
# setup inputs
r = 0.5
chord = 0.1
Rhub = 0.1
Rtip = 1.0
phi = 0.2
cl = 0.3
cd = 0.002
B = 3
Vx = 1.0
Vy = 5.0
useCd = True
hubLoss = True
tipLoss = True
wakerotation = True
x = np.array([r, chord, Rhub, Rtip, phi, cl, cd, Vx, Vy])
params = np.array([B, useCd, hubLoss, tipLoss, wakerotation])
n = len(x)
"""
Explanation: Now we setup some values for the inputs and parameters and put them into a variable array and an parameter array. Also we will save n which is the number of variables in x.
End of explanation
"""
# ------ finite difference --------
output, a, ap = function(x, params) # we are ignoring the other outputs although we could easily get there derivatives as well
g_fd = np.zeros(n) # initialize gradient vector for finite difference
for i in range(n): # iterate across all vars
# step size
step = 1e-6*x[i]
# take a step
xplus = np.copy(x)
xplus[i] += step
output_plus, a, ap = function(xplus, params)
g_fd[i] = (output_plus - output) / step
"""
Explanation: First, let's find the derivatives of the first output (fzero) with respect to x using finite differencing. We already know how to do this.
End of explanation
"""
# You can ignore this. I'm just ignoring a printed FutureWarning about a change affecting something internal to algopy
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
from algopy import UTPM # just the name of the algorithm (stands for univariate Taylor propagation of matrices)
from algopy import sin, cos, exp, sqrt # overloaded versions of functions we use
from algopy import arccos as acos # need to rename b.c. using the math version (acos) whereas numpy uses arccos
# create an algopy version of x
x_algopy = UTPM.init_jacobian(x)
# create an algopy version of outputs
output, a, ap = function(x_algopy, params)
# extract the gradients
g_ad_oo = UTPM.extract_jacobian(output) # could call again for the other outputs
"""
Explanation: Now let's compute exact derivatives using automatic differentiation (AD). We will use the algopy module in Python. This is an operator overloading method. If you are using Matlab there are not any AD methods built in, but you can find some 3rd party tools.
To use algopy properly we are need to import overloaded versions of the functions we are using (sin, cos, etc.). These overloaded versions will be able to keep track of partial derivatives through a chain rule.
End of explanation
"""
from _bem import inductionfactors_dv
# get derivative of each input
I = np.eye(n)
dr = I[0, :]
dchord = I[1, :]
dRhub = I[2, :]
dRtip = I[3, :]
dphi = I[4, :]
dcl = I[5, :]
dcd = I[6, :]
dVx = I[7, :]
dVy = I[8, :]
fzero, a, ap, doutput_dx, da_dx, dap_dx = inductionfactors_dv(r, chord, Rhub, Rtip,
phi, cl, cd, B, Vx, Vy, dr, dchord, dRhub, dRtip, dphi, dcl, dcd, dVx, dVy)
# rename the gradient
g_ad_sc = doutput_dx
"""
Explanation: That's it! We have numerically exact derivatives now for our output w.r.t all inputs in x. (We will show comparisons in accuracy at the end.
algopy is pretty easy to use, but I rarely use it myself for two main reasons. 1) It's very slow. Overloaded methods are already slow, and pure Python itself is slow, so for large problems there is a significant slow down. 2) algopy can handle most things in numpy but it's not as versatile and powerful as some other tools. The tool I use most frequently for AD is Tapenade, which works for Fortran and C code. This is nice because Python can call Fortran and C code fairly easily. I use Tapenade because 1) Fortran is fast and callable from Python so we can move computational bottlenecks to Fortran and still have an easy to use wrapper in Python. Tapenade also uses a source code transformation method, which keeps the AD part fast as well. 2) Tapenade is powerful and can handle forward and reverse mode, vectorized modes, loops, etc.
The Fortran version of the function at the beginning is reproduced below:
```Fortran
subroutine inductionFactors(r, chord, Rhub, Rtip, phi, cl, cd, B, &
Vx, Vy, useCd, hubLoss, tipLoss, wakerotation, &
fzero, a, ap)
implicit none
integer, parameter :: dp = kind(0.d0)
! in
real(dp), intent(in) :: r, chord, Rhub, Rtip, phi, cl, cd
integer, intent(in) :: B
real(dp), intent(in) :: Vx, Vy
logical, intent(in) :: useCd, hubLoss, tipLoss, wakerotation
!f2py logical, optional, intent(in) :: useCd = 1, hubLoss = 1, tipLoss = 1, wakerotation = 1
! out
real(dp), intent(out) :: fzero, a, ap
! local
real(dp) :: pi, sigma_p, sphi, cphi, lambda_r
real(dp) :: factortip, Ftip, factorhub, Fhub
real(dp) :: k, kp, cn, ct, F
real(dp) :: g1, g2, g3
! constants
pi = 3.1415926535897932_dp
sigma_p = B/2.0_dp/pi*chord/r
sphi = sin(phi)
cphi = cos(phi)
! resolve into normal and tangential forces
if ( .not. useCd ) then
cn = cl*cphi
ct = cl*sphi
else
cn = cl*cphi + cd*sphi
ct = cl*sphi - cd*cphi
end if
! Prandtl's tip and hub loss factor
Ftip = 1.0_dp
if ( tipLoss ) then
factortip = B/2.0_dp*(Rtip - r)/(r*abs(sphi))
Ftip = 2.0_dp/pi*acos(exp(-factortip))
end if
Fhub = 1.0_dp
if ( hubLoss ) then
factorhub = B/2.0_dp*(r - Rhub)/(Rhub*abs(sphi))
Fhub = 2.0_dp/pi*acos(exp(-factorhub))
end if
F = Ftip * Fhub
! bem parameters
k = sigma_p*cn/4.0_dp/F/sphi/sphi
kp = sigma_p*ct/4.0_dp/F/sphi/cphi
! compute axial induction factor
if (phi > 0) then ! momentum/empirical
! update axial induction factor
if (k <= 2.0_dp/3.0) then ! momentum state
a = k/(1+k)
else ! Glauert(Buhl) correction
g1 = 2.0_dp*F*k - (10.0_dp/9-F)
g2 = 2.0_dp*F*k - (4.0_dp/3-F)*F
g3 = 2.0_dp*F*k - (25.0_dp/9-2*F)
if (abs(g3) < 1e-6_dp) then ! avoid singularity
a = 1.0_dp - 1.0_dp/2.0/sqrt(g2)
else
a = (g1 - sqrt(g2)) / g3
end if
end if
else ! propeller brake region (a and ap not directly used but update anyway)
if (k > 1) then
a = k/(k-1)
else
a = 0.0_dp ! dummy value
end if
end if
! compute tangential induction factor
ap = kp/(1-kp)
if (.not. wakerotation) then
ap = 0.0_dp
kp = 0.0_dp
end if
! error function
lambda_r = Vy/Vx
if (phi > 0) then ! momentum/empirical
fzero = sphi/(1-a) - cphi/lambda_r*(1-kp)
else ! propeller brake region
fzero = sphi*(1-k) - cphi/lambda_r*(1-kp)
end if
end subroutine inductionFactors
```
We then run this code through Tapenade and it creates a source code transformed version that computes derivatives in addition to function values (I've used a forward mode in this case). It's not pretty to look at, but it's automatically generated. If we change the original source, we would need to regenerate.
```fortran
! Generated by TAPENADE (INRIA, Tropics team)
! Tapenade 3.9 (r5096) - 24 Feb 2014 16:54
!
! Differentiation of inductionfactors in forward (tangent) mode:
! variations of useful results: ap fzero a
! with respect to varying inputs: r rtip rhub chord phi cd cl
! vx vy
! RW status of diff variables: r:in rtip:in ap:out rhub:in chord:in
! fzero:out phi:in cd:in cl:in vx:in vy:in a:out
SUBROUTINE INDUCTIONFACTORS_DV(r, chord, rhub, rtip, phi, cl, cd, b, &
vx, vy, usecd, hubloss, tiploss, wakerotation, &
rd, chordd, rhubd, rtipd, phid, cld, cdd, vxd, vyd, &
fzero, a, ap, fzerod, ad, apd, nbdirs)
! Hint: nbdirsmax should be the maximum number of differentiation directions
IMPLICIT NONE
INTEGER, PARAMETER :: dp=KIND(0.d0)
! in
REAL(dp), INTENT(IN) :: r, chord, rhub, rtip, phi, cl, cd
REAL(dp), DIMENSION(nbdirs), INTENT(IN) :: rd, chordd, rhubd, rtipd&
& , phid, cld, cdd
INTEGER, INTENT(IN) :: b
REAL(dp), INTENT(IN) :: vx, vy
REAL(dp), DIMENSION(nbdirs), INTENT(IN) :: vxd, vyd
LOGICAL, INTENT(IN) :: usecd, hubloss, tiploss, wakerotation
INTEGER, intent(in) :: nbdirs
!f2py logical, optional, intent(in) :: useCd = 1, hubLoss = 1, tipLoss = 1, wakerotation = 1
! out
REAL(dp), INTENT(OUT) :: fzero, a, ap
REAL(dp), DIMENSION(nbdirs), INTENT(OUT) :: fzerod, ad, apd
! local
REAL(dp) :: pi, sigma_p, sphi, cphi, lambda_r
REAL(dp), DIMENSION(nbdirs) :: sigma_pd, sphid, cphid, lambda_rd
REAL(dp) :: factortip, ftip, factorhub, fhub
REAL(dp), DIMENSION(nbdirs) :: factortipd, ftipd, factorhubd, fhubd
REAL(dp) :: k, kp, cn, ct, f
REAL(dp), DIMENSION(nbdirs) :: kd, kpd, cnd, ctd, fd
REAL(dp) :: g1, g2, g3
REAL(dp), DIMENSION(nbdirs) :: g1d, g2d, g3d
INTRINSIC KIND
INTRINSIC SIN
INTRINSIC COS
INTRINSIC ABS
INTRINSIC EXP
INTRINSIC ACOS
INTRINSIC SQRT
REAL(dp) :: arg1
REAL(dp), DIMENSION(nbdirs) :: arg1d
REAL(dp) :: result1
REAL(dp), DIMENSION(nbdirs) :: result1d
INTEGER :: nd
REAL(dp) :: abs1d(nbdirs)
REAL(dp) :: abs0d(nbdirs)
REAL(dp) :: abs2
REAL(dp) :: abs1
REAL(dp) :: abs0
! constants
pi = 3.1415926535897932_dp
DO nd=1,nbdirs
sigma_pd(nd) = (bchordd(nd)r/(2.0_dppi)-bchordrd(nd)/(2.0_dppi&
& ))/r2
sphid(nd) = phid(nd)COS(phi)
cphid(nd) = -(phid(nd)SIN(phi))
END DO
sigma_p = b/2.0_dp/pichord/r
sphi = SIN(phi)
cphi = COS(phi)
! resolve into normal and tangential forces
IF (.NOT.usecd) THEN
DO nd=1,nbdirs
cnd(nd) = cld(nd)cphi + clcphid(nd)
ctd(nd) = cld(nd)sphi + clsphid(nd)
END DO
cn = clcphi
ct = clsphi
ELSE
DO nd=1,nbdirs
cnd(nd) = cld(nd)cphi + clcphid(nd) + cdd(nd)sphi + cdsphid(nd&
& )
ctd(nd) = cld(nd)sphi + clsphid(nd) - cdd(nd)cphi - cdcphid(nd&
& )
END DO
cn = clcphi + cdsphi
ct = clsphi - cdcphi
END IF
! Prandtl's tip and hub loss factor
ftip = 1.0_dp
IF (tiploss) THEN
IF (sphi .GE. 0.) THEN
DO nd=1,nbdirs
abs0d(nd) = sphid(nd)
END DO
abs0 = sphi
ELSE
DO nd=1,nbdirs
abs0d(nd) = -sphid(nd)
END DO
abs0 = -sphi
END IF
factortip = b/2.0_dp(rtip-r)/(rabs0)
arg1 = EXP(-factortip)
DO nd=1,nbdirs
factortipd(nd) = (b(rtipd(nd)-rd(nd))rabs0/2.0_dp-b(rtip-r)(&
& rd(nd)abs0+rabs0d(nd))/2.0_dp)/(r*abs0)2
arg1d(nd) = -(factortipd(nd)EXP(-factortip))
IF (arg1 .EQ. 1.0 .OR. arg1 .EQ. (-1.0)) THEN
result1d(nd) = 0.0
ELSE
result1d(nd) = -(arg1d(nd)/SQRT(1.0-arg12))
END IF
ftipd(nd) = 2.0_dpresult1d(nd)/pi
END DO
result1 = ACOS(arg1)
ftip = 2.0_dp/piresult1
ELSE
DO nd=1,nbdirs
ftipd(nd) = 0.0
END DO
END IF
fhub = 1.0_dp
IF (hubloss) THEN
IF (sphi .GE. 0.) THEN
DO nd=1,nbdirs
abs1d(nd) = sphid(nd)
END DO
abs1 = sphi
ELSE
DO nd=1,nbdirs
abs1d(nd) = -sphid(nd)
END DO
abs1 = -sphi
END IF
factorhub = b/2.0_dp(r-rhub)/(rhubabs1)
arg1 = EXP(-factorhub)
DO nd=1,nbdirs
factorhubd(nd) = (b(rd(nd)-rhubd(nd))rhubabs1/2.0_dp-b(r-rhub)&
& (rhubd(nd)abs1+rhubabs1d(nd))/2.0_dp)/(rhubabs1)2
arg1d(nd) = -(factorhubd(nd)EXP(-factorhub))
IF (arg1 .EQ. 1.0 .OR. arg1 .EQ. (-1.0)) THEN
result1d(nd) = 0.0
ELSE
result1d(nd) = -(arg1d(nd)/SQRT(1.0-arg12))
END IF
fhubd(nd) = 2.0_dpresult1d(nd)/pi
END DO
result1 = ACOS(arg1)
fhub = 2.0_dp/piresult1
ELSE
DO nd=1,nbdirs
fhubd(nd) = 0.0
END DO
END IF
f = ftipfhub
DO nd=1,nbdirs
fd(nd) = ftipd(nd)fhub + ftipfhubd(nd)
! bem parameters
kd(nd) = ((((sigma_pd(nd)cn+sigma_pcnd(nd))f/4.0_dp-sigma_pcnfd&
& (nd)/4.0_dp)*sphi/f2-sigma_pcnsphid(nd)/(4.0_dpf))/sphi-&
& sigma_pcnsphid(nd)/(4.0_dpfsphi))/sphi2
kpd(nd) = ((((sigma_pd(nd)ct+sigma_pctd(nd))f/4.0_dp-sigma_pct&
& fd(nd)/4.0_dp)sphi/f2-sigma_pctsphid(nd)/(4.0_dpf))cphi/&
& sphi2-sigma_pctcphid(nd)/(4.0_dpfsphi))/cphi2
END DO
k = sigma_pcn/4.0_dp/f/sphi/sphi
kp = sigma_pct/4.0_dp/f/sphi/cphi
! compute axial induction factor
IF (phi .GT. 0) THEN
! momentum/empirical
! update axial induction factor
IF (k .LE. 2.0_dp/3.0) THEN
DO nd=1,nbdirs
! momentum state
ad(nd) = (kd(nd)(1+k)-kkd(nd))/(1+k)2
END DO
a = k/(1+k)
ELSE
DO nd=1,nbdirs
! Glauert(Buhl) correction
g1d(nd) = 2.0_dp(fd(nd)k+fkd(nd)) + fd(nd)
g2d(nd) = 2.0_dp(fd(nd)k+fkd(nd)) - (4.0_dp/3-f)fd(nd) + fd(&
& nd)f
g3d(nd) = 2.0_dp(fd(nd)k+fkd(nd)) + 2fd(nd)
END DO
g1 = 2.0_dpfk - (10.0_dp/9-f)
g2 = 2.0_dpfk - (4.0_dp/3-f)f
g3 = 2.0_dpfk - (25.0_dp/9-2f)
IF (g3 .GE. 0.) THEN
abs2 = g3
ELSE
abs2 = -g3
END IF
IF (abs2 .LT. 1e-6_dp) THEN
result1 = SQRT(g2)
DO nd=1,nbdirs
! avoid singularity
IF (g2 .EQ. 0.0) THEN
result1d(nd) = 0.0
ELSE
result1d(nd) = g2d(nd)/(2.0SQRT(g2))
END IF
ad(nd) = result1d(nd)/2.0/result12
END DO
a = 1.0_dp - 1.0_dp/2.0/result1
ELSE
result1 = SQRT(g2)
DO nd=1,nbdirs
IF (g2 .EQ. 0.0) THEN
result1d(nd) = 0.0
ELSE
result1d(nd) = g2d(nd)/(2.0SQRT(g2))
END IF
ad(nd) = ((g1d(nd)-result1d(nd))g3-(g1-result1)*g3d(nd))/g3&
& 2
END DO
a = (g1-result1)/g3
END IF
END IF
ELSE IF (k .GT. 1) THEN
! propeller brake region (a and ap not directly used but update anyway)
DO nd=1,nbdirs
ad(nd) = (kd(nd)(k-1)-kkd(nd))/(k-1)2
END DO
a = k/(k-1)
ELSE
! dummy value
a = 0.0_dp
DO nd=1,nbdirs
ad(nd) = 0.0
END DO
END IF
DO nd=1,nbdirs
! compute tangential induction factor
apd(nd) = (kpd(nd)(1-kp)+kpkpd(nd))/(1-kp)2
END DO
ap = kp/(1-kp)
IF (.NOT.wakerotation) THEN
ap = 0.0_dp
kp = 0.0_dp
DO nd=1,nbdirs
apd(nd) = 0.0
kpd(nd) = 0.0
END DO
END IF
DO nd=1,nbdirs
! error function
lambda_rd(nd) = (vyd(nd)vx-vyvxd(nd))/vx2
END DO
lambda_r = vy/vx
IF (phi .GT. 0) THEN
DO nd=1,nbdirs
! momentum/empirical
fzerod(nd) = (sphid(nd)(1-a)+sphiad(nd))/(1-a)2 - (cphid(nd)&
& lambda_r-cphilambda_rd(nd))(1-kp)/lambda_r2 + cphikpd(nd)/&
& lambda_r
END DO
fzero = sphi/(1-a) - cphi/lambda_r(1-kp)
ELSE
DO nd=1,nbdirs
! propeller brake region
fzerod(nd) = sphid(nd)(1-k) - sphikd(nd) - (cphid(nd)lambda_r-&
& cphilambda_rd(nd))(1-kp)/lambda_r2 + cphikpd(nd)/lambda_r
END DO
fzero = sphi(1-k) - cphi/lambda_r*(1-kp)
END IF
END SUBROUTINE INDUCTIONFACTORS_DV
```
We now build this Fortran code into a shared library that I called _bem. I will skip the details because this isn't our focus, but this is fairly easy to do. We can now access this function from Python by importing the library.
This AD method let's us compute combinations of partial derivatives if we want, but generally we just want each derivatie separately. There are nine inputs in x and we are using the array version that let's us compute multiple derivatives simultaneously. We will set:
dr = [1, 0, 0, 0, 0, 0, 0, 0, 0]
dchord = [0, 1, 0, 0, 0, 0, 0, 0, 0]
and so on ...
This will set the first derivative of doutput_dx to doutput_dr, the second to doutput_dchord, etc.
End of explanation
"""
# import complex versions
from cmath import sin, cos, acos, exp, sqrt
# redine absolute value
def c_abs(x):
if x.real < 0:
return -x
else:
return x
abs = c_abs
# initialize
g_cs = np.zeros(n)
# iterate across entires in x
for i in range(n):
step_complex = 1e-30 # take a really small step
# new xvalue: x + ih
xcomplex = np.copy(x).astype(complex)
xcomplex[i] += complex(0.0, step_complex)
# call function
output_complex, a_complex, ap_complex = function(xcomplex, params)
# compute gradient
g_cs[i] = output_complex.imag / step_complex
"""
Explanation: This approach is a little more work, but it's much faster and not difficult once you've done it a few times.
How do we check that we did it correctly? Comparing against finite difference is ok, but the best way is to compare against complex step if possible because it is also exact.
To compute the gradients using complex step we need to import complex versions of our functions. We will also need to redefine the absolute value function because the complex version of absolute value$^1$ is the square root of the sum of squares of the real and imaginary part, which is not what we want. You can find more details on functions that need to be overloaded in complex step here.
$^1$ Recall that absolute value is not differentiable at 0, and is generally best to avoid if possible. In this particular code it is fine because I know that the argument will never be zero. It can be negative or positive depending on the operating conditions, but will never cross over to zero.
End of explanation
"""
from __future__ import print_function
print('error_fd =', (g_fd - g_cs)/g_cs)
"""
Explanation: Let's see how we did. We will compare our errors relative to complex step. First finite differencing:
End of explanation
"""
print('error_ad_oo =', (g_ad_oo - g_cs)/g_cs)
print('error_ad_sc =', (g_ad_sc - g_cs)/g_cs)
"""
Explanation: The errors are pretty small, except for the third entry is really bad. If we use a different step size for that one entry, we can do a little better. It turns out the function is very insensitive to Rhub at this point and so getting an accurate gradient with FD is difficult.
Let's now look at the two AD methods: one with operator overloading and one with source code transformation
End of explanation
"""
|
setiQuest/ML4SETI | tutorials/Step_4_Classify_with_WatsonVR.ipynb | apache-2.0 | #!pip install --user --upgrade watson-developer-cloud
#Making a local folder to put my data.
#NOTE: YOU MUST do something like this on a Spark Enterprise cluster at the hackathon so that
#you can put your data into a separate local file space. Otherwise, you'll likely collide with
#your fellow participants.
my_team_name_data_folder = 'my_team_name_data_folder'
mydatafolder = os.environ['PWD'] + '/' + my_team_name_data_folder + '/zipfiles'
if os.path.exists(mydatafolder) is False:
os.makedirs(mydatafolder)
!ls -al $mydatafolder
from __future__ import division
import cStringIO
import glob
import json
import numpy
import os
import re
import requests
import time
import timeit
import zipfile
import copy
from random import randint
import matplotlib.pyplot as plt
import numpy as np
import ibmseti
from watson_developer_cloud import VisualRecognitionV3
apiVer = VisualRecognitionV3.latest_version #'2016-05-20'
classifier_prefix = 'setisignals'
#You can sign up with WatsonVR through Bluemix to get a key
#However, Hackathon participants will be provided with a WATSON VR key that has more free API calls per day.
apiKey = 'WATSON-VISUAL-RECOGNITION-API-KEY'
# TODO: remove before publication
apiKey = ''
"""
Explanation: Watson Visual Recognition Training with Spectrogram Images from SETI Signal Data
https://www.ibm.com/watson/developercloud/visual-recognition/api/v3/
https://www.ibm.com/watson/developercloud/doc/visual-recognition/customizing.html
https://github.com/watson-developer-cloud/python-sdk
https://github.com/watson-developer-cloud/python-sdk/blob/master/watson_developer_cloud/visual_recognition_v3.py
<hr>
Install the Watson Developer Cloud Python SDK
Install the Python SDK if has not been previously installed !pip install --upgrade watson-developer-cloud
Restart the kernel, after installing the SDK
End of explanation
"""
vr = VisualRecognitionV3(apiVer, api_key=apiKey)
"""
Explanation: <br/>
Init the Watson Visual Recognition Python Library
you may need to install the SDK first: !pip install --upgrade watson-developer-cloud
you will need the API key from the Watson Visual Recognition Service
End of explanation
"""
## View all of your classifiers
classifiers = vr.list_classifiers()
print json.dumps(classifiers, indent=2)
## Run this cell ONLY IF you want to REMOVE all classifiers
# Otherwise, the subsequent cell will append images to the `classifier_prefix` classifier
classifiers = vr.list_classifiers()
for c in classifiers['classifiers']:
vr.delete_classifier(c['classifier_id'])
classifiers = vr.list_classifiers()
print json.dumps(classifiers, indent=2)
#Create new classifier, or get the ID for the latest SETISIGNALS classifier
classifier_id = None
classifier = None
classifiers = vr.list_classifiers()
for c in classifiers['classifiers']:
if c['status'] == 'ready' and (classifier_prefix in c['classifier_id']):
classifier_id = c['classifier_id']
if classifier_id is not None:
classifier = vr.get_classifier(classifier_id)
print '\r\nFound classifer:\r\n\r\n{}'.format(json.dumps(classifier, indent=2))
else:
print 'No custom classifier available\r\n'
print(json.dumps(classifiers, indent=2))
"""
Explanation: <br/>
Look For Existing Custom Classifier
Use an existing custom classifier (and update) if one exists, else a new custom classifier will be created
End of explanation
"""
squiggle = sorted(glob.glob('{}/classification_*_squiggle.zip'.format(mydatafolder)))
narrowband = sorted(glob.glob('{}/classification_*_narrowband.zip'.format(mydatafolder)))
narrowbanddrd = sorted(glob.glob('{}/classification_*_narrowbanddrd.zip'.format(mydatafolder)))
noise = sorted(glob.glob('{}/classification_*_noise.zip'.format(mydatafolder)))
sq = len(squiggle)
nb = len(narrowband)
nd = len(narrowbanddrd)
ns = len(noise)
## Possible todo here: Try using the 'noise' as a "negative" example when training Watson. See the Watson documentation.
num = max(sq, nb, nd, ns)
#num = max(sq, nb, nd)
if classifier_id is None:
print 'Adding custom classifier ... this may take awhile'
else:
print 'Updating custom classifier {} ... this may take awhile'.format(classifier_id)
for i in range(num):
squiggle_p = open(squiggle[i], 'rb') if i < sq else None
narrowband_p = open(narrowband[i], 'rb') if i < nb else None
narrowbanddrd_p = open(narrowbanddrd[i], 'rb') if i < nd else None
noise_p = open(noise[i], 'rb') if i < ns else None
if classifier_id is None:
# print 'Creating with\r\n{}\r\n{}\r\n{}\r'.format(squiggle_p, narrowband_p, narrowbanddrd_p) #use this line if going to use 'noise' as negative example
print 'Creating with\r\n{}\r\n{}\r\n{}\r\n{}\r'.format(squiggle_p, narrowband_p, narrowbanddrd_p, noise_p)
classifier = vr.create_classifier(
classifier_prefix,
squiggle_positive_examples = squiggle_p,
narrowband_positive_examples = narrowband_p,
narrowbanddrd_positive_examples = narrowbanddrd_p,
noise_positive_examples = noise_p #remove this if going to use noise as 'negative' examples
)
classifier_id = classifier['classifier_id']
else:
print 'Updating with\r\n{}\r\n{}\r\n{}\r\n{}\r'.format(squiggle_p, narrowband_p, narrowbanddrd_p, noise_p)
# print 'Updating with\r\n{}\r\n{}\r\n{}\r'.format(squiggle_p, narrowband_p, narrowbanddrd_p) #use this line if going to use 'noise' as negative example
classifier = vr.update_classifier(
classifier_id,
squiggle_positive_examples = squiggle_p,
narrowband_positive_examples = narrowband_p,
narrowbanddrd_positive_examples = narrowbanddrd_p,
noise_positive_examples = noise_p #remove this if going to use noise as 'negative' examples
)
if squiggle_p is not None:
squiggle_p.close()
if narrowband_p is not None:
narrowband_p.close()
if narrowbanddrd_p is not None:
narrowbanddrd_p.close()
if noise_p is not None:
noise_p.close()
if classifier is not None:
print('Classifier: {}'.format(classifier_id))
status = classifier['status']
startTimer = timeit.default_timer()
while status in ['training', 'retraining']:
print('Status: {}'.format(status))
time.sleep(10)
classifier = vr.get_classifier(classifier_id)
status = classifier['status']
stopTimer = timeit.default_timer()
print '{} took {} minutes'.format('Training' if i == 0 else 'Retraining', int(stopTimer - startTimer) / 60)
print(json.dumps(vr.get_classifier(classifier_id), indent=2))
"""
Explanation: <br/>
Send the Images Archives to the Watson Visual Recognition Service for Training
https://www.ibm.com/watson/developercloud/doc/visual-recognition/customizing.html
https://www.ibm.com/watson/developercloud/visual-recognition/api/v3/
https://github.com/watson-developer-cloud/python-sdk
End of explanation
"""
zz = zipfile.ZipFile(mydatafolder + '/' + 'testset_1_narrowband.zip')
test_list = zz.namelist()
randomSignal = zz.open(test_list[10],'r')
from IPython.display import Image
squigImg = randomSignal.read()
Image(squigImg)
#note - have to 'open' this again because it was already .read() out in the line above
randomSignal = zz.open(test_list[10],'r')
url_result = vr.classify(images_file=randomSignal, classifier_ids=classifier_id, threshold=0.0)
print(json.dumps(url_result, indent=2))
"""
Explanation: <br/>
Take a Random Data File for Testing
Take a random data file from the test set
Create a Spectrogram Image
End of explanation
"""
#Create a dictionary object to store results from Watson
from collections import defaultdict
class_list = ['squiggle', 'noise', 'narrowband', 'narrowbanddrd']
results_group_by_class = {}
for classification in class_list:
results_group_by_class[classification] = defaultdict(list)
failed_to_classify_uuid_list = []
print classifier_id
results_group_by_class
# locate test archives that were produced in step 3 and add them to the test set
test_set = []
for classification in class_list:
test_set = numpy.concatenate((test_set, sorted(glob.glob('{}/testset_*_{}.zip'.format(mydatafolder, classification)))))
for image_archive_name in test_set:
image_count = 0
# count number of images in <image_archive_name>
with zipfile.ZipFile(image_archive_name,'r') as image_archive:
images = image_archive.namelist()
image_count = len(images)
# bulk classify images in <image_archive_name>
with open(image_archive_name, 'rb') as images_file:
print 'Running test ({} images) for {}... this may take a while.'.format(image_count, image_archive_name)
startTimer = timeit.default_timer()
classify_results = vr.classify(images_file=images_file, classifier_ids=[classifier_id], threshold=0.0)
# print(json.dumps(classify_results, indent=2))
# identify class from ZIP file name, e.g. testset_10_squiggle.zip
mo = re.match('^(.+)_(\d+)_(.+)\.zip$',image_archive_name.split('/')[-1])
classification = mo.group(3)
resdict = results_group_by_class[classification]
passed = 0
for classify_result in classify_results['images']:
pngfilename = classify_result['image'].split('/')[-1]
uuid = pngfilename.split('.')[0]
maxscore = 0
maxscoreclass = None
if "error" in classify_result:
# print error information
print classify_result
#add to failed list
failed_to_classify_uuid_list.append(uuid)
else:
classifiers_arr = classify_result['classifiers']
score_list = []
for classifier_result in classifiers_arr:
for class_result in classifier_result['classes']:
score_list.append((class_result['class'],class_result['score']))
if class_result['score'] > maxscore:
maxscore = class_result['score']
maxscoreclass = class_result['class']
#sort alphabetically
score_list.sort(key = lambda x: x[0])
score_list = map(lambda x:x[1], score_list)
if maxscoreclass is None:
print 'Failed: {} - Actual: {}, No classification returned'.format(pngfilename, classification)
#print(json.dumps(classify_result, indent=2))
elif maxscoreclass != classification:
print 'Failed: {} - Actual: {}, Watson Predicted: {} ({})'.format(pngfilename, classification, maxscoreclass, maxscore)
else:
passed += 1
print 'Passed: {} - Actual: {}, Watson Predicted: {} ({})'.format(pngfilename, classification, maxscoreclass, maxscore)
if maxscoreclass is not None:
resdict['signal_classification'].append(classification)
resdict['uuid'].append(uuid)
resdict['watson_class'].append(maxscoreclass)
resdict['watson_class_score'].append(maxscore)
resdict['scores'].append(score_list)
else:
#add to failed list
failed_to_classify_uuid_list.append(uuid)
stopTimer = timeit.default_timer()
print 'Test Score: {}% ({} of {} Passed)'.format(int((float(passed) / image_count) * 100), passed, image_count)
print 'Tested {} images in {} minutes'.format(image_count, int(stopTimer - startTimer) / 60)
print "DONE."
import pickle
pickle.dump(results_group_by_class, open(mydatafolder + '/' + "watson_results.pickle", "w"))
watson_results = pickle.load(open(mydatafolder + '/' + "watson_results.pickle","r"))
# reorganize the watson_results dictionary to extract
# a list of [true_class, [scores], estimated_class] and
# use these for measuring our model's performance
class_scores = []
for k in watson_results.keys():
class_scores += zip(watson_results[k]['uuid'], watson_results[k]['signal_classification'], watson_results[k]['scores'], watson_results[k]['watson_class'] )
class_scores[100]
from sklearn.metrics import classification_report
import sklearn
y_train = [x[1] for x in class_scores]
y_pred = [x[3] for x in class_scores]
y_prob = [x[2] for x in class_scores]
#we normalize the Watson score values to 1 in order to use them in the log_loss calculation even though the Watson VR scores are not true class prediction probabilities
y_prob = map(lambda x: (x, sum(x)), y_prob)
y_prob = map(lambda x: [y / x[1] for y in x[0]], y_prob)
print sklearn.metrics.classification_report(y_train,y_pred)
print sklearn.metrics.confusion_matrix(y_train,y_pred)
print("Classification accuracy: %0.6f" % sklearn.metrics.accuracy_score(y_train,y_pred) )
print("Log Loss: %0.6f" % sklearn.metrics.log_loss(y_train,y_prob) )
"""
Explanation: <br/>
Run the Complete Test Set
End of explanation
"""
import csv
my_output_results = my_team_name_data_folder + '/' + 'watson_scores.csv'
with open(my_output_results, 'w') as csvfile:
fwriter = csv.writer(csvfile, delimiter=',')
for row in class_scores:
fwriter.writerow([row[0]] + row[2])
!cat $my_team_name_data_folder/watson_scores.csv
"""
Explanation: Generate CSV file for Scoreboard
Here's an example of what the CSV file should look like for submission to the scoreboard. Although, in this case, we only have 4 classes instead of 7.
NOTE: This uses the PNG files created in the Step 3 notebook, which only contain the BASIC4 data set. The code challenge and hackathon will be based on the Primary Data Set which contains 7 signal classes
This only shows you how to create a csv file. You'll need to take the primary test set data, create PNGs for them, package them into zips, then modify the code above to send those zip files to Watson
End of explanation
"""
|
TutsWiki/source | static/QuestDB.ipynb | mit | import requests
import urllib.parse as par
q = 'create table weather'\
'(temp int,'\
'rain24H double,'\
'thunder boolean,'\
'timestamp timestamp)'\
'timestamp(timestamp)'
r = requests.get("http://localhost:9000/exec?query=" + q)
print(r.status_code)
"""
Explanation: Creating a Database in QuestDB
We would generate some random data and store that data into a test database named weather which we would create.
The create statement in QuestDB pushes the data into the bottom of the table.
Our data is comprised of:
* temp is the temperature in Celcius.
* rain24H is the amount of precipitation in last 24 hours.
* thunder is a boolean returning True if thunder is present.
* timestamp is the date and time.
If the below code block prints 200, it means the database was created successfully.
If it prints 400 then it means that database already exists.
End of explanation
"""
import requests
import random
from datetime import datetime
success = 0
fail = 0
random.seed()
for x in range(1000):
temp = random.randint(-40, 55)
rain24H = round(random.uniform(10.45, 235.15), 2)
thunder = bool(random.getrandbits(1))
query = "insert into weather values("\
+ str(temp) + ","\
+ str(rain24H) + "," \
+ str(thunder) +",systimestamp())"
r = requests.get("http://localhost:9000/exec?query=" + query)
if r.status_code == 200:
success += 1
else:
fail += 1
print("Rows inserted: " + str(success))
if fail > 0:
print("Rows Failed: " + str(fail))
"""
Explanation: Adding data to our Database
In the next code cell we generate and add 1000 entries of data to our database.
End of explanation
"""
import requests
import io
r = requests.get("http://localhost:9000/exp?query=select * from weather")
rawData = r.text
print(rawData)
"""
Explanation: Now let's try querying some data!
End of explanation
"""
import pandas as pd
pData = pd.read_csv(io.StringIO(rawData), parse_dates=['timestamp'])
print(pData)
"""
Explanation: Using Pandas to frame our data :
End of explanation
"""
import urllib.parse
q = "select tempF,"\
" rain24H,"\
" timestamp"\
" from weather"\
query = urllib.parse.quote(q)
r = requests.get("http://localhost:9000/exp?query=" + query)
queryData = r.content
rawData = pd.read_csv(io.StringIO(queryData.decode('utf-8')))
print(rawData)
"""
Explanation: *Note: The query string must be URL-encoded before it is sent.
End of explanation
"""
from matplotlib import pyplot as plt
plt.bar(rawData['timestamp'], rawData['rain24H'])
from matplotlib import pyplot as plt
plt.bar(rawData['timestamp'], rawData['tempF'])
"""
Explanation: Plotting our data:
End of explanation
"""
|
rdipietro/tensorflow | tensorflow/tools/docker/notebooks/3_mnist_from_scratch.ipynb | apache-2.0 | from __future__ import print_function
from IPython.display import Image
import base64
Image(data=base64.decodestring("iVBORw0KGgoAAAANSUhEUgAAAMYAAABFCAYAAAARv5krAAAYl0lEQVR4Ae3dV4wc1bYG4D3YYJucc8455yCSSIYrBAi4EjriAZHECyAk3rAID1gCIXGRgIvASIQr8UTmgDA5imByPpicTcYGY+yrbx+tOUWpu2e6u7qnZ7qXVFPVVbv2Xutfce+q7hlasmTJktSAXrnn8vR/3/xXmnnadg1aTfxL3/7rwfSPmT+kf/7vf098YRtK+FnaZaf/SS++OjNNathufF9caiT2v/xxqbTGki/SXyM1nODXv/r8+7Tb+r+lnxZNcEFHEG/e3LnpoINXSh/PWzxCy/F9eWjOnDlLrr/++jR16tQakgylqdOWTZOGFqX5C/5IjXNLjdt7/NTvv/+eTjnllLT//vunr776Kl100UVpueWWq8n10lOmpSmTU5o/f0Fa3DDH1ry9p0/++eefaZ999slYYPS0005LK664Yk2eJ02ekqZNnZx+XzA/LfprYgGxePHitOqqq6YZM2akyfPmzUvXXXddHceoic2EOckxDj300CzPggUL0g033NC3OKy00krDer3pppv6FgcBIjvGUkv9u5paZZVVhoHpl4Mvv/wyhfxDQ0NZ7H7EQbacPHny39Tejzj88ccfacqUKRmHEecYf0Nr8GGAQJ8gMHCMPlH0QMzmEBg4RnN4DVr3CQIDx+gTRQ/EbA6BgWM0h9egdZ8g8PeliD4RutfF/Ouvfz9OtZy8aNGiNH/+/GGWl1122XzseYuVNKtqsaI23Ghw0DYCA8doG8JqO+AUG2+8cVq4cGHaY4890vLLL5/WXXfdfI6jvPDCC3lJ8amnnkoezP3000/pl19+GThHtWpIPekYomTxFS7HnkqKjMsss0yGgFE4r62tSBFVJ02aNPyconi9V4/JwzHwT9ZNNtkkeZ6w5ZZbph133DH99ttv6ccff8zXX3nllcRRnHNfv2cNGMQWGRaOrWbUrjsGBRLAA6U4Lhoqw9h2223ztRBq6aWXzsbgvueffz4Lu9NOO2UnYTgrr7xy7tO9nOH111/Pbb744ov0ww8/jAvngAdFMvQDDjggG/0GG2yQX1GZNm1aziCCwzrrrJPl3muvvXKwePnll9M333wzHDCKWPbLMbuAkfISjnvvvXcW/emnn85lqCBqa4a65hiYR/Gk2RNGRlwm3n7ggQfmdrKD9sqJtdZaKxvCnDlz8n3Tp09PXmPYeuutc0SVNQjvnmuvvTa3efzxx9N33303PGZ5rF75DBvvqq233nrp22+/TWeddVbyikpgxCE4vQDhlQUBRfDw2esbs2fPTquvvnqviNN1PuIdJ4GErVx44YUZowsuuCB9+umn6eeff84BspmsWqljhPFDxjGGYx/lDkN33udajCoVlAjRzl4U8LjefRwnPjsXG8OJqKBd8NB1LTU5IHyCd7LJGOYXNoGjFqaGIKtrERDIDKtukfGMH/zRZa1A101+YBF44KfMYzO8VOYYjDWiukiGqc022yyXOUqdzTffPJ/z1ialeqNVxA9gi0wzlOJ5juJlR8JeddVV+ZrIKTq4ZvJp/8EHH+SU+txzz+W2SqmxVFZRplrH5DTRXmGFFdKuu+6azjjjjOzosl5g6D54CQCI4mGjhNQO5occckh2LvLTA6fqJOEnyhU6kNlkZmUuvrtNcFx77bUzhsZWXgoSsm6t4Dsa/tp2DErCmA04HAI4FLjaaqtlBhmnSKiNY4rDtHZFB6jFMMH0RVDH+nCPYxtDCFJnKkniRbDitWjTK3sykQUuMLPn3DZGX8SFnCG/fVyz5zCCBtIHTLshdzif8fERn8cKXxjCNOwCTu3Qf6yqhV4AQokiP489//zzM0DxnQYKwqAtIkko1kQzFFxvaNcJ6u3Pe+65J/cRRvDee+9lA2BInIyRff/997nNO++8k7t0vl2A6vHWynmyiPJ43WKLLbIijz/++LTddtvlTCdzwIWSg9yjxBJ0GN/DDz+c7zv77LOzbEceeWSekwVGgsOsWbNyNo0+qt7DfPvtt8/dmtvIGnPnzk3PPPPMsJ6rHrNef/BBeJA90RprrJEDcNhctMkXR/mnbccwuCjNGTbaaKMc8TBZprITxOdgOvbuKxqGz6LSJ598kseJ9Gi1CYmSv/76a3YyJZWMZJ6Ceskp8EMusihFEAyUmVaa8G2rxTNHIrd733///eH7YeaLNe5xrEzlWNF/HqQDf0Tm+GIbvYdD43MsKAIo/JDgE0G5aFfN8NaWYxiUshikqGYTTUSt0TCkjXsYNqJQQso+rgGa0vX58ccf56hQTtk+48F92rmvlnE1A0on2uKP0Yrw+Nxzzz0zn+ZhjKwRXq6vueaa2TmUiRQfS7SyNeMks9IV9vrvJOl/q622yo4Mfw5Pvm6TMclLdit6shh+YAMnq1E29tEsteUYBgMSgxa5MOAzJZcVXQs4bUR8XxhCHIwzMALCBuCcx5q0tF3u133l8XrRMchFiRYNyMxBKM/5IjZlWVzjULKwACISytIWFsi56aab5mvOKyEikmdAO/iHY+BDCRUZuoPD1e1akECyLseA7d13352DhdKak8Cmlt3U7TSl9p58FwejYK8ncAwKpDTnGDcARbWiAUjHiNEHsITSPlagpEZChcfrZzwSOfBOiQwXLuR3PjAhtwAD08iAMCO/a+5xPTIm3ALjwERf0V+c69QeT7ZujVdLDhgKBrANXAMreMESRkU7rdVPrXNtZ4xIpSLH1VdfnR3j4IMPzkbw2Wefpa+//jovo5188slZsZjArAcvFP3YY4+lSy+9NEdTdTTy0I5xHHfccfm1CH2LtuORKEqmkwVlVU+sBY+IdJRmE0zeeOONnEXuu+++7AhnnnlmWn/99XMJ5brtzTffzHMJx/o555xzkgdb0U8rRtAKrnTYqtG1Ml6teyxInHDCCdlGYByBmG2Z97ChVvFo2zEwbHCRTbqP7EDxPjN2pUBEe86AXAcsg+f10TYMSTvnRM1ulQe1wG/nHEXZZEJZUIYQ5cgWMsEgMgqclFdkdh+MbFFyuddnWMLNfTYkcuuXHlBkpFYNI3dS+mMMfCHHsZWadfUjmQVn8iLywscG21apMscQwR555JEM3KuvvpoZ5LHOmzgjAvBwzFt2/Oijj3Lm4Ayin/MU/eGHH+b2N998c/5MGSaZ44nw7OEd5Rx77LE5+1EehYXxkpes5li2K6+8Mhv8Lrvsko381ltvzcEBfvHQKh5auk9GPvHEE3NJAx+/eKL/HXbYIQcbK3nwN067xAk4s5VHdbvsx0nxrYQeKxJMZAfBA7GlRx99NC9EtCN7JY4RoPBeAHIAyrB3jpHYwqu1d02d7HpZcfqINo5dL7eJMXtxTzk2sgWFM/gcsnCakI2cFOk+523O+Qw7WaeYHYpYRp9xn4BkbPdWSfgJXYYM+ne+2xRj2sdx8EDu8rm4Ntp9pY4RSmb0CIPOAVNGoLA47yU4S2xen37ppZdy9CkLE/3lm8bJHzJbbiavt2Q9p7AkK7oyXAZOLk7gs9c4PJC0AOE8DDyrgJkaWgYQkSPYuAdpWySfteU8HhqKouYq+io6ZfGeZo7xpbT1+jt+jGULfprpq922ePHMBibwjWVq523KVrzBsIzTaMeu1DFi0HI0YyyYtAekY5MltbRyihFJiROBKIYTwMCTWJNubwdQFCXFapK9z96mtbjgs3thFKWnUgjBzNZIya5FOyUcPG36q4LwRgZ6Ix8HtBk3tirGGU0feAkslHfk5PzBh2cXSkvtWqWOOEaRGcoSHdXDMoYn1tK8yaON0ahbCWgFS/vxSnjn5F4ItLeiFAGAzCKc7MDA1OlIjc4pLFKE7FEyxb5ZPNTbtuiv2fvrtddfOFsYXcwj8d8qv/XGq3femLvvvnvOvrIYPPEjG+PDseDbDnXcMXiyiGiyyACOPvrovN95552zV3/++ef5zVveznlEo6CICvG5l/d4JSvHP+qoo7JjKDs4PkVSGPm9HSz9W5rlPEoCQYHjVFXyRGnBOcKA28VOP/qTBWX6YnS2IKB8qYL/enyGHPbKziOOOCLj6sGeslGW8L6Y4ANr2MY99fpsdL7jjmFwkSTSr6gDVCk+tmDQedcJ5LgdwaLPbu7xjJRRNlErSsiQhVHJlOEQoh182o1wRTnharwYs3itnWP9Rd/RD5mLW5yveh/YRhYMjItyBh/wjPat8tEVx6B00RKo5513XpIl7rzzzuwEourMmTOz95uIcyBfTSXYiy++mCOrSFS1klsFrNZ9eGPoJtmeyRx00EE5cpGbIi21XnbZZbkMee2117KMHIKMIVcotVb/vXoOz6I0+URoMlVFcBFE7L1+IjNYIo6v/fo+D3tC+FCR+FHuwNUCgfOtUlccI5hnJMoIBhN1sBICqMoNNaLP3pkiFGciIIBC4HaEbRWk0dyHb3Mp/EY0I6+NsytvyKxsKhpQr8ozGpm1IZ8IbV+PyllGuyh1YBXXOQEcy6R8M5eAHzuxxX3GRvbaCKJ4aRfXrjkG5jEbk00Prxi8SZTJKmc5/PDDc5v99tsvC+hBjWtqStmD0F4Ma1foMvDtfqZMUc3/lYjMSFFW3NS7JtyyoKzSiTocHoFJHMc+MlK7Mta7n9NbATJerbEYvQWIWCVitIyaXrV3nsG7H2Y2GVcbxyj6NX+waKEPmOvbfShwtjhQDDz5Ygt/uuoY+OPtnICDEMBTWsAQUu0NBBsDEgFEWOADAiDaVRERWsCq5i34IRN+TbTJgn8KwzOFuR4KDUXW7Kyik53Ep8w/+RkxWeO5S1EM5wVABguXMGp69dk1x87D0ObdL32GHI5tsDQGHtwbm/Hw4TpnKvNY5Ge0x113DEwT3tIsIdSnDIfxcxJAevCHfE9cXcmotHXfAw88kIFUdgFjLMn4HuZRuh9FExmjRCCnZxRqcPxz8ioUVk9eRhJkPAYHV8ZVFRkjjFSfAtw222yTy2OZ0iv15fHcQ4dKaMcwsBdEEL26RzaIh5+yK7LSBGPno8yOZX+vzRhfXzZ8cRrtyzzkzpr803XHwB8wTJYIRol+VY8zqMMBbP0f+cExE1qTdbU7x3jwwQdzVBYdesExKNiEWx2MfwoOAyCbJ9uRHZvUTcPmsENhGNE4HBKOHKNqZzQu3KNfX9H1nRABQZlbNkpt4SNo4DWIIesDj9qYnwki2giWqol3330348kZLPm7xvi1Pffcc7MzhA3gy/0oeIuxWtmPiWNgNCIFYwcCAa2FA1ikJZz1aeUVsBmge9TyoqGoIqKUFdEKCFXcU0/pHJizVMUnXBiBh6IicdTTzsEOnuZkDE/2rcJI4KMf/TF+0TucwDhkZ+DGL4/nGkPGV/AIC+2RvfP6ZPTI4gu5XNM/Um7RPzuIFyn1zW7wpQ9UHj+fbOHPmDlGCOGBGIeQQfwuq0jnISBQfOHft7JEHN94Q5xF6XLFFVfkyKIEGyuiGAo3r6BIx0imcM6k+6GHHspOEQbcDq+UTl4BwRu7PstUiPEJFsa9/PLL83nXg6d2xnUvoxS5L7744uGyh/wyRpRF9YwSHsHjE088kWWADQeRFThZkTgBstensZG5h4m56oEdcAp9CwTOVUlj6hgECcGBpA6XDazeiLKhVABQAhKB3cNxbEAL4KoEppm+gjf3OMafDf+UW7zeTL/ltqIiAxBMOIIxnLOHgbFsMGQ4InhE0nJfrXw2hnIRD3SFBKmYWDfqE49woFvOzZno3NxM0HDciMjBDsjEBgLTsJHYN+qjmWtj7hjBLKFFQgL7qRz14jHHHJPBcC2M3wRPVDT5ohzZRv0Z16O/sdozAKmdopUH5kftTrzJpl+lk29CcgpLw3BgpMbwwqF/S80pGJ6xO0WM+8Ybbxw2TuOEoTYakwyovB/JKdzDMVQOHvCRzXju890fL11aGhcMqqIxdwwCRkYQDZAaE7lWBhyosQEmQM439MgffDHm0Si8EcuBC0ezcQSZVKYktzFEW+3sfQ4natRvu9eMTS9F7IvHo+m/2fb6LNuCc0WsW+mzHq9j6hgE9YCHp5tkez2EAVjlMOmyUlU2Lis8ygVR0rykyoltPZCaOY9fr32Qp50X6xi7pWCGbsHBvwLgGIcddljGxvcsjOU1GseyiKjJQWydpiqNsBlei85BfhNxeJunVCl31x0jBOMAjJ9jRC3OEERDS7QMI0qQohIYgLSq7FJuMZbi9WZA7kRbvFAWx5Dyy449mjEDG/dyDPW4VSiy2iNvBcCSUdxyyy35OYHrqJUx843j8I/qQpA074BVVdR1x+AIHCIiIGewsqIuds41tSSlOxeOFHuOQ/E+2zPEuFYVKM32U3RMvGy44YbZMTg2B2+GOIXXJcjpR9lkUy/QyZ7GUU8zAD9RCiuR0oQYVv1IMAk7qFL+rjkGg7GZQPLufffdN69QKJtkCAKKjNGu1p7gMgWDYEDRpkpAmu0rnMLehie/RavcI49Sr1ZW0w6V91ac/IsxmdHPB0U5pQ+4+TExDudNUhPufnaKIn7N6m2k9h11jKLRqP+UQJb2eHh4uYjK0LW1D0MpCq0NR4g24RTR/0hCdvM6/m14FtljeTL4D/liedFeO7LYcyh7eMGDY8X16IM8Vp9kWjj2GwWG5IZb2FKVOHTMMTCvDKBgD2Z22223bNynnnpqVrZXBFxjQDZUFJiwIqKHN8qHO+64IxvN/fffn9vG/VWC0UpfeC5uZMEbg/ctM/8SzYOxZ599Nhs4ebSx0ECpcDFvMCdRggkesoQ+zaHU0N4EgAEnue2227JTON+LgaEVDFu5h+w2Wdl33GFkEUIQqYIqdYwwbJGO8q2xOydqUiTFWpJVPzsuUwhlzzFETxlGdFSCqaMB4XwvUzgKWU3AyW4uwFns4QMbilUyxbq8p/4cw3UEB8FDGQUDx/acqB8zRS2dw5qthe3VatPKucocg6JiYu3lP2nfawvekKVITzgJQLH24QTBtPZeE2D89957b27jwZ1IwIm8R2OMWHmJ+3pxTzaK8l+HyMrgTzrppMxqOIEsGoZvz0nsyWiliRMUl2G9aOk6POyLZVUvYtBpniL4wA1m9lVSW46BOQqKpTLK9FnUsxftvW4swssa4dkhCGFCMNfcp08lhM9KKc4h0obgsa8ShHb6Cv5DJnu8IwHB9TB852DkOlzIRV6kXbSVMfQj48BWdhE0TLr1Fe3zQR/+gRMK5yjuq4KjZccQ2SlYjexHmCnSkiLjtsesmlnpQ5naFo1A5GMAHoJxBI709ttv54ygntZWmWEcQMS9VQleRT9kNmfAG0P3HRPGbHnVudg4gEyJOAYiE0wikHAAcxHyxndO4KI/WHEK/Qzo7wjAXfaFNdurikaNtIERRTqmYIYdE2tGEs8hfJ8iFB/3xV67MCjG8NZbb6Unn3wyC+XfDxfnDxFp496qhK6qn5CDA5twK/fIRH5Gb0MMOhxCFgkKjOBoHqKEkmWvueaanG04iTHcP3CKQO0/e3ZhgceP2smqcKyKRuUYlEKhPDL+d5z1c4qVFTDnmBIZMwZ9DiKAzTmvCetPNFR7W7fXXt/KLddqTcyjr17bRybkEF5XiQhPHnMuDlF07MCB3I49l4EDxTrnfsFBJBxQbQSKeGoROqjdurWzIzoGJqRxS2KUf/rpp2flcRDRjRKVCdpFhCwz7rOVKE5z++235/7uuuuuXDq5P5yKEY0np8B3TKb9K1/vLTF0/7MiJtyRPYrq4fx+7R2e7vFDDzDyfx1goPwcUGMEYG/rFI3oGAYW0UUyimQIcRwGzbgpVsZAUTYE065xCtc5GUeSHTyg4kzKs/FKoSBljyhvTz6y2gseZAwlwgI+cNBGtpV9ZRj4BobjFY9O8g0bQcXWaRpxBE5hHuFnJ0XB6dOn56ge2QGDlK2dFSSG4b8kxVzEdSWGVxgYQLzrxJkIGgbTaUE73b9MZ/KNfIMOJpdcckndYZWmFAwv+wgydW/o8wsCK3xnz56dFzx8oxPGtk7QiI5h0FBaeGzRKYIpjDN2ig6lB9OiprmI60qNieIMIXvsQy7yotjH9eI+2hbPDY4bI8D+2JdnWTYY+iwDs78qaUTHEM0sI1pClAVMnqX9ImGQszB6DHoNOLzZNZlGRlEq9JNB9JOsRXvoxDGnsDTudwFUHTNmzMjDqEaU9xYvGgWiZnka0TEo16CeNyCM1SLtwmt5cNEoCOUa5xjQAIFWEGBP5rbKdTRr1qwcfGUMthXVTCt917pnRMdwE6ZiQm0JckADBMYCgWLwtXjTSeq/d5Y7ieag7wmDwMAxJowqB4JUicDAMapEc9DXhEFgcjxcM7vvR4on7bHS1q84WNkpUr/iEL+aOLRw4cIlQCmuIhUBmsjHlpQ9c7EmzjEsN1vd6DeCg8UVT+qRd7b6EQey8wMT+6El8RSu36xhIO8AgQYI9F94bADG4NIAgUDg/wHX+3lgThDIegAAAABJRU5ErkJggg==".encode('utf-8')), embed=True)
"""
Explanation: MNIST from scratch
This notebook walks through an example of training a TensorFlow model to do digit classification using the MNIST data set. MNIST is a labeled set of images of handwritten digits.
An example follows.
End of explanation
"""
import os
from six.moves.urllib.request import urlretrieve
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
WORK_DIRECTORY = "/tmp/mnist-data"
def maybe_download(filename):
"""A helper to download the data files if not present."""
if not os.path.exists(WORK_DIRECTORY):
os.mkdir(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not os.path.exists(filepath):
filepath, _ = urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
else:
print('Already downloaded', filename)
return filepath
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
"""
Explanation: We're going to be building a model that recognizes these digits as 5, 0, and 4.
Imports and input data
We'll proceed in steps, beginning with importing and inspecting the MNIST data. This doesn't have anything to do with TensorFlow in particular -- we're just downloading the data archive.
End of explanation
"""
import gzip, binascii, struct, numpy
import matplotlib.pyplot as plt
with gzip.open(test_data_filename) as f:
# Print the header fields.
for field in ['magic number', 'image count', 'rows', 'columns']:
# struct.unpack reads the binary data provided by f.read.
# The format string '>i' decodes a big-endian integer, which
# is the encoding of the data.
print(field, struct.unpack('>i', f.read(4))[0])
# Read the first 28x28 set of pixel values.
# Each pixel is one byte, [0, 255], a uint8.
buf = f.read(28 * 28)
image = numpy.frombuffer(buf, dtype=numpy.uint8)
# Print the first few values of image.
print('First 10 pixels:', image[:10])
"""
Explanation: Working with the images
Now we have the files, but the format requires a bit of pre-processing before we can work with it. The data is gzipped, requiring us to decompress it. And, each of the images are grayscale-encoded with values from [0, 255]; we'll normalize these to [-0.5, 0.5].
Let's try to unpack the data using the documented format:
[offset] [type] [value] [description]
0000 32 bit integer 0x00000803(2051) magic number
0004 32 bit integer 60000 number of images
0008 32 bit integer 28 number of rows
0012 32 bit integer 28 number of columns
0016 unsigned byte ?? pixel
0017 unsigned byte ?? pixel
........
xxxx unsigned byte ?? pixel
Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).
We'll start by reading the first image from the test data as a sanity check.
End of explanation
"""
%matplotlib inline
# We'll show the image and its pixel value histogram side-by-side.
_, (ax1, ax2) = plt.subplots(1, 2)
# To interpret the values as a 28x28 image, we need to reshape
# the numpy array, which is one dimensional.
ax1.imshow(image.reshape(28, 28), cmap=plt.cm.Greys);
ax2.hist(image, bins=20, range=[0,255]);
"""
Explanation: The first 10 pixels are all 0 values. Not very interesting, but also unsurprising. We'd expect most of the pixel values to be the background color, 0.
We could print all 28 * 28 values, but what we really need to do to make sure we're reading our data properly is look at an image.
End of explanation
"""
# Let's convert the uint8 image to 32 bit floats and rescale
# the values to be centered around 0, between [-0.5, 0.5].
#
# We again plot the image and histogram to check that we
# haven't mangled the data.
scaled = image.astype(numpy.float32)
scaled = (scaled - (255 / 2.0)) / 255
_, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(scaled.reshape(28, 28), cmap=plt.cm.Greys);
ax2.hist(scaled, bins=20, range=[-0.5, 0.5]);
"""
Explanation: The large number of 0 values correspond to the background of the image, another large mass of value 255 is black, and a mix of grayscale transition values in between.
Both the image and histogram look sensible. But, it's good practice when training image models to normalize values to be centered around 0.
We'll do that next. The normalization code is fairly short, and it may be tempting to assume we haven't made mistakes, but we'll double-check by looking at the rendered input and histogram again. Malformed inputs are a surprisingly common source of errors when developing new models.
End of explanation
"""
with gzip.open(test_labels_filename) as f:
# Print the header fields.
for field in ['magic number', 'label count']:
print(field, struct.unpack('>i', f.read(4))[0])
print('First label:', struct.unpack('B', f.read(1))[0])
"""
Explanation: Great -- we've retained the correct image data while properly rescaling to the range [-0.5, 0.5].
Reading the labels
Let's next unpack the test label data. The format here is similar: a magic number followed by a count followed by the labels as uint8 values. In more detail:
[offset] [type] [value] [description]
0000 32 bit integer 0x00000801(2049) magic number (MSB first)
0004 32 bit integer 10000 number of items
0008 unsigned byte ?? label
0009 unsigned byte ?? label
........
xxxx unsigned byte ?? label
As with the image data, let's read the first test set value to sanity check our input path. We'll expect a 7.
End of explanation
"""
IMAGE_SIZE = 28
PIXEL_DEPTH = 255
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
For MNIST data, the number of channels is always 1.
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
# Skip the magic number and dimensions; we know these values.
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)
return data
train_data = extract_data(train_data_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
"""
Explanation: Indeed, the first label of the test set is 7.
Forming the training, testing, and validation data sets
Now that we understand how to read a single element, we can read a much larger set that we'll use for training, testing, and validation.
Image data
The code below is a generalization of our prototyping above that reads the entire test and training data set.
End of explanation
"""
print('Training data shape', train_data.shape)
_, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(train_data[0].reshape(28, 28), cmap=plt.cm.Greys);
ax2.imshow(train_data[1].reshape(28, 28), cmap=plt.cm.Greys);
"""
Explanation: A crucial difference here is how we reshape the array of pixel values. Instead of one image that's 28x28, we now have a set of 60,000 images, each one being 28x28. We also include a number of channels, which for grayscale images as we have here is 1.
Let's make sure we've got the reshaping parameters right by inspecting the dimensions and the first two images. (Again, mangled input is a very common source of errors.)
End of explanation
"""
NUM_LABELS = 10
def extract_labels(filename, num_images):
"""Extract the labels into a 1-hot matrix [image index, label index]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
# Skip the magic number and count; we know these values.
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
# Convert to dense 1-hot representation.
return (numpy.arange(NUM_LABELS) == labels[:, None]).astype(numpy.float32)
train_labels = extract_labels(train_labels_filename, 60000)
test_labels = extract_labels(test_labels_filename, 10000)
"""
Explanation: Looks good. Now we know how to index our full set of training and test images.
Label data
Let's move on to loading the full set of labels. As is typical in classification problems, we'll convert our input labels into a 1-hot encoding over a length 10 vector corresponding to 10 digits. The vector [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], for example, would correspond to the digit 1.
End of explanation
"""
print('Training labels shape', train_labels.shape)
print('First label vector', train_labels[0])
print('Second label vector', train_labels[1])
"""
Explanation: As with our image data, we'll double-check that our 1-hot encoding of the first few values matches our expectations.
End of explanation
"""
VALIDATION_SIZE = 5000
validation_data = train_data[:VALIDATION_SIZE, :, :, :]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, :, :, :]
train_labels = train_labels[VALIDATION_SIZE:]
train_size = train_labels.shape[0]
print('Validation shape', validation_data.shape)
print('Train size', train_size)
"""
Explanation: The 1-hot encoding looks reasonable.
Segmenting data into training, test, and validation
The final step in preparing our data is to split it into three sets: training, test, and validation. This isn't the format of the original data set, so we'll take a small slice of the training data and treat that as our validation set.
End of explanation
"""
import tensorflow as tf
# We'll bundle groups of examples during training for efficiency.
# This defines the size of the batch.
BATCH_SIZE = 60
# We have only one channel in our grayscale images.
NUM_CHANNELS = 1
# The random seed that defines initialization.
SEED = 42
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step, which we'll write once we define the graph structure.
train_data_node = tf.placeholder(
tf.float32,
shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
train_labels_node = tf.placeholder(tf.float32,
shape=(BATCH_SIZE, NUM_LABELS))
# For the validation and test data, we'll just hold the entire dataset in
# one constant node.
validation_data_node = tf.constant(validation_data)
test_data_node = tf.constant(test_data)
# The variables below hold all the trainable weights. For each, the
# parameter defines how the variables will be initialized.
conv1_weights = tf.Variable(
tf.truncated_normal([5, 5, NUM_CHANNELS, 32], # 5x5 filter, depth 32.
stddev=0.1,
seed=SEED))
conv1_biases = tf.Variable(tf.zeros([32]))
conv2_weights = tf.Variable(
tf.truncated_normal([5, 5, 32, 64],
stddev=0.1,
seed=SEED))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64]))
fc1_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal([IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
stddev=0.1,
seed=SEED))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512]))
fc2_weights = tf.Variable(
tf.truncated_normal([512, NUM_LABELS],
stddev=0.1,
seed=SEED))
fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS]))
print('Done')
"""
Explanation: Defining the model
Now that we've prepared our data, we're ready to define our model.
The comments describe the architecture, which fairly typical of models that process image data. The raw input passes through several convolution and max pooling layers with rectified linear activations before several fully connected layers and a softmax loss for predicting the output class. During training, we use dropout.
We'll separate our model definition into three steps:
Defining the variables that will hold the trainable weights.
Defining the basic model graph structure described above. And,
Stamping out several copies of the model graph for training, testing, and validation.
We'll start with the variables.
End of explanation
"""
def model(data, train=False):
"""The Model definition."""
# 2D convolution, with 'SAME' padding (i.e. the output feature map has
# the same size as the input). Note that {strides} is a 4D array whose
# shape matches the data layout: [image index, y, x, depth].
conv = tf.nn.conv2d(data,
conv1_weights,
strides=[1, 1, 1, 1],
padding='SAME')
# Bias and rectified linear non-linearity.
relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
# Max pooling. The kernel size spec ksize also follows the layout of
# the data. Here we have a pooling window of 2, and a stride of 2.
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
conv = tf.nn.conv2d(pool,
conv2_weights,
strides=[1, 1, 1, 1],
padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool.get_shape().as_list()
reshape = tf.reshape(
pool,
[pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
return tf.matmul(hidden, fc2_weights) + fc2_biases
print('Done')
"""
Explanation: Now that we've defined the variables to be trained, we're ready to wire them together into a TensorFlow graph.
We'll define a helper to do this, model, which will return copies of the graph suitable for training and testing. Note the train argument, which controls whether or not dropout is used in the hidden layer. (We want to use dropout only during training.)
End of explanation
"""
# Training computation: logits + cross-entropy loss.
logits = model(train_data_node, True)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits, train_labels_node))
# L2 regularization for the fully connected parameters.
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# Optimizer: set up a variable that's incremented once per batch and
# controls the learning rate decay.
batch = tf.Variable(0)
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
train_size, # Decay step.
0.95, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,
0.9).minimize(loss,
global_step=batch)
# Predictions for the minibatch, validation set and test set.
train_prediction = tf.nn.softmax(logits)
# We'll compute them only once in a while by calling their {eval()} method.
validation_prediction = tf.nn.softmax(model(validation_data_node))
test_prediction = tf.nn.softmax(model(test_data_node))
print('Done')
"""
Explanation: Having defined the basic structure of the graph, we're ready to stamp out multiple copies for training, testing, and validation.
Here, we'll do some customizations depending on which graph we're constructing. train_prediction holds the training graph, for which we use cross-entropy loss and weight regularization. We'll adjust the learning rate during training -- that's handled by the exponential_decay operation, which is itself an argument to the MomentumOptimizer that performs the actual training.
The vaildation and prediction graphs are much simpler the generate -- we need only create copies of the model with the validation and test inputs and a softmax classifier as the output.
End of explanation
"""
# Create a new interactive session that we'll use in
# subsequent code cells.
s = tf.InteractiveSession()
# Use our newly created session as the default for
# subsequent operations.
s.as_default()
# Initialize all the variables we defined above.
tf.global_variables_initializer().run()
"""
Explanation: Training and visualizing results
Now that we have the training, test, and validation graphs, we're ready to actually go through the training loop and periodically evaluate loss and error.
All of these operations take place in the context of a session. In Python, we'd write something like:
with tf.Session() as s:
...training / test / evaluation loop...
But, here, we'll want to keep the session open so we can poke at values as we work out the details of training. The TensorFlow API includes a function for this, InteractiveSession.
We'll start by creating a session and initializing the varibles we defined above.
End of explanation
"""
BATCH_SIZE = 60
# Grab the first BATCH_SIZE examples and labels.
batch_data = train_data[:BATCH_SIZE, :, :, :]
batch_labels = train_labels[:BATCH_SIZE]
# This dictionary maps the batch data (as a numpy array) to the
# node in the graph it should be fed to.
feed_dict = {train_data_node: batch_data,
train_labels_node: batch_labels}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = s.run(
[optimizer, loss, learning_rate, train_prediction],
feed_dict=feed_dict)
print('Done')
"""
Explanation: Now we're ready to perform operations on the graph. Let's start with one round of training. We're going to organize our training steps into batches for efficiency; i.e., training using a small set of examples at each step rather than a single example.
End of explanation
"""
print(predictions[0])
"""
Explanation: Let's take a look at the predictions. How did we do? Recall that the output will be probabilities over the possible classes, so let's look at those probabilities.
End of explanation
"""
# The highest probability in the first entry.
print('First prediction', numpy.argmax(predictions[0]))
# But, predictions is actually a list of BATCH_SIZE probability vectors.
print(predictions.shape)
# So, we'll take the highest probability for each vector.
print('All predictions', numpy.argmax(predictions, 1))
"""
Explanation: As expected without training, the predictions are all noise. Let's write a scoring function that picks the class with the maximum probability and compares with the example's label. We'll start by converting the probability vectors returned by the softmax into predictions we can match against the labels.
End of explanation
"""
print('Batch labels', numpy.argmax(batch_labels, 1))
"""
Explanation: Next, we can do the same thing for our labels -- using argmax to convert our 1-hot encoding into a digit class.
End of explanation
"""
correct = numpy.sum(numpy.argmax(predictions, 1) == numpy.argmax(batch_labels, 1))
total = predictions.shape[0]
print(float(correct) / float(total))
confusions = numpy.zeros([10, 10], numpy.float32)
bundled = zip(numpy.argmax(predictions, 1), numpy.argmax(batch_labels, 1))
for predicted, actual in bundled:
confusions[predicted, actual] += 1
plt.grid(False)
plt.xticks(numpy.arange(NUM_LABELS))
plt.yticks(numpy.arange(NUM_LABELS))
plt.imshow(confusions, cmap=plt.cm.jet, interpolation='nearest');
"""
Explanation: Now we can compare the predicted and label classes to compute the error rate and confusion matrix for this batch.
End of explanation
"""
def error_rate(predictions, labels):
"""Return the error rate and confusions."""
correct = numpy.sum(numpy.argmax(predictions, 1) == numpy.argmax(labels, 1))
total = predictions.shape[0]
error = 100.0 - (100 * float(correct) / float(total))
confusions = numpy.zeros([10, 10], numpy.float32)
bundled = zip(numpy.argmax(predictions, 1), numpy.argmax(labels, 1))
for predicted, actual in bundled:
confusions[predicted, actual] += 1
return error, confusions
print('Done')
"""
Explanation: Now let's wrap this up into our scoring function.
End of explanation
"""
# Train over the first 1/4th of our training set.
steps = train_size // BATCH_SIZE
for step in range(steps):
# Compute the offset of the current minibatch in the data.
# Note that we could use better randomization across epochs.
offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), :, :, :]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
# This dictionary maps the batch data (as a numpy array) to the
# node in the graph it should be fed to.
feed_dict = {train_data_node: batch_data,
train_labels_node: batch_labels}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = s.run(
[optimizer, loss, learning_rate, train_prediction],
feed_dict=feed_dict)
# Print out the loss periodically.
if step % 100 == 0:
error, _ = error_rate(predictions, batch_labels)
print('Step %d of %d' % (step, steps))
print('Mini-batch loss: %.5f Error: %.5f Learning rate: %.5f' % (l, error, lr))
print('Validation error: %.1f%%' % error_rate(
validation_prediction.eval(), validation_labels)[0])
"""
Explanation: We'll need to train for some time to actually see useful predicted values. Let's define a loop that will go through our data. We'll print the loss and error periodically.
Here, we want to iterate over the entire data set rather than just the first batch, so we'll need to slice the data to that end.
(One pass through our training set will take some time on a CPU, so be patient if you are executing this notebook.)
End of explanation
"""
test_error, confusions = error_rate(test_prediction.eval(), test_labels)
print('Test error: %.1f%%' % test_error)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.grid(False)
plt.xticks(numpy.arange(NUM_LABELS))
plt.yticks(numpy.arange(NUM_LABELS))
plt.imshow(confusions, cmap=plt.cm.jet, interpolation='nearest');
for i, cas in enumerate(confusions):
for j, count in enumerate(cas):
if count > 0:
xoff = .07 * len(str(count))
plt.text(j-xoff, i+.2, int(count), fontsize=9, color='white')
"""
Explanation: The error seems to have gone down. Let's evaluate the results using the test set.
To help identify rare mispredictions, we'll include the raw count of each (prediction, label) pair in the confusion matrix.
End of explanation
"""
plt.xticks(numpy.arange(NUM_LABELS))
plt.hist(numpy.argmax(test_labels, 1));
"""
Explanation: We can see here that we're mostly accurate, with some errors you might expect, e.g., '9' is often confused as '4'.
Let's do another sanity check to make sure this matches roughly the distribution of our test set, e.g., it seems like we have fewer '5' values.
End of explanation
"""
|
OSGeo-live/CesiumWidget | GSOC/notebooks/Projects/CESIUM/CesiumWidget Example.ipynb | apache-2.0 | from CesiumWidget import CesiumWidget
from IPython import display
from czml_example import simple_czml, complex_czml
"""
Explanation: Cesium Widget Example
This is an example notebook to sow how to bind the Cesiumjs with the IPython interactive widget system.
End of explanation
"""
cesiumExample = CesiumWidget(width="100%",czml=simple_czml, enable_lighting=True)
"""
Explanation: The code:
from czml_example import simple_czml, complex_czml
Simply import some CZML data for the viewer to display.
Create widget object
End of explanation
"""
#cesiumExample
"""
Explanation: Display the widget:
End of explanation
"""
#cesiumExample.czml = simple_czml
"""
Explanation: Add some data to the viewer
A simple czml
End of explanation
"""
#cesiumExample.czml = complex_czml
"""
Explanation: A more complex CZML example
End of explanation
"""
from __future__ import print_function
from ipywidgets import interact, interactive, fixed
from ipywidgets import widgets
"""
Explanation: Now let's make some interactive widget:
End of explanation
"""
myczml = {'simple_czml':simple_czml, 'complex_czml':complex_czml}
myplace = {'Eboli, IT':'', 'Woods Hole, MA':'', 'Durham, NH':''}
import geocoder
import time
for i in myplace.keys():
g = geocoder.google(i)
print(g.latlng)
myplace[i]=g.latlng
myplace
def f(CZML):
cesiumExample.czml = myczml[CZML]
def z(Location,z=(0,20000000)):
cesiumExample.zoom_to(myplace[Location][1],myplace[Location][0],z)
interact(f, CZML=('simple_czml','complex_czml')), interact(z, Location=('Eboli, IT','Woods Hole, MA','Durham, NH'));
cesiumExample
"""
Explanation: store the CZML objet in a dictionary and use their name as keys
define a function to switch between CZML
bind the IPython intercat class to the function
End of explanation
"""
|
jorisroovers/machinelearning-playground | datascience/NumPy.ipynb | apache-2.0 | # This is a regular python list
range(1,4)
# If you multiply or add to it, it extends the list
a = range(1, 10)
a * 2
a = range(1,11)
a + [ 11 ]
# Compare this to np.array:
import numpy as np
np.array(range(1,10))
# Multiplication is defined as multiplying each element in the array
a = np.array(range(1, 10))
a * 2
a + 5 # Adding to it works as well, this just adds 5 to each element (note that this operation is undefined in regular python)
"""
Explanation: NumPy introduction
NumPy provides low-level and fast features to manipulate arrays of data (main implementation is in C).
While it has some relatively advanced features like linear algebraic calculations and more, in many cases Pandas provides a more convenient high level interface to do the same things (and even more).
If you just want a quick overview, the following cheatsheet provides one:
Numpy Arrays
The basic building block of numpy is the array which has a number of operations defined on it. Because of this, you don't need to write for loops to manipulate them. This is often called vectorization.
End of explanation
"""
np.array([[1,2],[3,4],[5,6]])
a = np.array([[1,2],[3,4],[5,6]])
a.shape, a.dtype, a.size, a.ndim # shape -> dimension sizes, dtype -> datatype, size -> total number of elems, ndim -> number of dimensions
# You can use comma-separated indexing like so:
a[1,1] # same as a[1][1]
# Note that 1,1 is really a tuple (the parenthesis are just ommited), so this works too:
indices = (1,1)
a[indices]
# Note that regular python doesn't support this
mylist = [[1,2],[3,4]]
# mylist[1,1] # error!
# As always, use ? to get details
a?
"""
Explanation: ndarray is actually a multi-dimensional array
End of explanation
"""
np.zeros(5)
np.ones(10)
np.empty(7) # Empty returns uninitialized garbage values (not zeroes!)
np.identity(5) # identity array
np.arange(11) # same as .nparray(range(11))
np.array(range(11))
"""
Explanation: Generating arrays
Numpy has a number of convenience functions to initialize arrays with zeros, ones or empty values.
Others are identity for the identity array, and ndrange which is the equivalent of python's range.
End of explanation
"""
np.array([1,2,3], dtype='float64')
# Show all available types
np.sctypes
# Consider strings
a = np.array(['12', '999', '432536'])
a.dtype
"""
Explanation: Datatypes
Each np.array has a datatype associated with it. You can also cast between types.
End of explanation
"""
np.array(['123', '21345312312'])
# You can also cast between types
a.astype(np.int32) # This copies the data into a new array, it does not change the array itself!
"""
Explanation: The datatype S5 stands for Fixed String with length 5, because the longest string in the array is of length 5. Compare this to:
End of explanation
"""
a = np.array(range(10, 20))
a[3:]
a[4:6]
"""
Explanation: Slicing
Index manipulation (slicing)with np.arrays is actually pretty similar to how it works with regular python lists
End of explanation
"""
a[3:6] = 33
a
"""
Explanation: However, slices in Numpy are actually views on the original np.array which means that if you manipulate them,
the array changes as well.
End of explanation
"""
b = range(1, 10)
# b[2:7] = 10 # this will raise an error
# Copies need to be explicit in numpy
b = a[3:6].copy()
b[:] = 22 # change all values to 22
b, a # print b and a, see that a is not modified
# You can also slice multi-dimensionally
c = np.array([[1,2,3], [4,5,6], [7,8,9]])
c[1:,:1] # Only keep the last 2 arrays, and from them, only keep up the first elements
# Note how this is different from using c[1:][:1]
# This is really doing 2 operations: first slice to keep the last 2 arrays.
# This returns a new array: array([[4, 5, 6],[7, 8, 9]])
# Then from this new array, return the first element.
c[1:][:1]
"""
Explanation: Compare this to regular python:
End of explanation
"""
# A boolean mask is just a boolean array
mask = np.array([ True, False, True ])
mask
# To apply the mask against a target, just pass it like an index.
# The result is an array with the elements from 'target' that had True on their corresponding index in 'mask'.
target = np.array([7,8,9])
target[mask]
# This works for multi-dimensional arrays too, but the result will obviously be a single dimensional array
# Also, you need to make sure that the dimensions of your target and mask arrays match
target2 = np.array([['a','b','c'], ['d','e','f'],['g','h','i']])
mask2 = np.array([[False,True,False], [True, True, False], [True, False, True]])
target2[mask2]
"""
Explanation: This picture explains NumPy's array slicing pretty well.
Boolean indexing
There are 2 parts to boolean indexing:
1. Apply a boolean mask to an np.array. Boolean masks are just arrays of booleans:
[True, False, True]
2. Creating boolean masks using boolean conditions
Applying a boolean mask
End of explanation
"""
numbers = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
numbers > 5
numbers % 2 == 0 # Even numbers
"""
Explanation: Creating a boolean mask
The easiest way to create a boolean mask is to just create an array with booleans in it. However, you can also create boolean masks by applying a boolean expression to a existing array.
End of explanation
"""
names = np.array(["John", "Mary", "Joe", "Jane", "Marc", "Jorge", "Adele" ])
names == "Joe"
"""
Explanation: Strings work too!
End of explanation
"""
(names == "Joe") | (names == "Mary")
"""
Explanation: You can combine filters using the boolean arithmetic operations | and &. Note that you have to but the individual boolean expressions between parentheses at this point.
End of explanation
"""
names[names == "Joe"], numbers[numbers > 5]
"""
Explanation: Once you have boolean mask, you can apply it to an array of the same length as a boolean mask. This is often useful if you want to select certain values in an array like so:
End of explanation
"""
numbers = np.array([-1, -9, 18.2, 3, 4.3, 0, 5.3, -12.2])
numbers
np.sum(numbers), np.mean(numbers)
np.square(numbers)
np.abs(numbers)
np.sqrt(np.abs(numbers)) # Can't take sqrt of negative number, so let's get the abs values first
np.max(numbers), np.min(numbers)
np.ceil(numbers), np.floor(numbers)
"""
Explanation: Universal functions
A universal function, or ufunc, is a function that performs elementwise operations on data in ndarrays. You can think of them as fast vectorized wrappers for simple functions that take one or more scalar values and produce one or more scalar results.
End of explanation
"""
np.greater(numbers, 3)
# combining with boolean arithmetic
np.logical_or(np.less_equal(numbers, 4), np.greater(numbers, 0))
np.sort(numbers)
np.unique(np.array([1, 2, 4, 2, 5, 1]))
"""
Explanation: The boolean expressions that create boolean masks (see prev section) can also be expressed explicitely
End of explanation
"""
numbers.sum(), numbers.mean(), numbers.min(), numbers.max()
"""
Explanation: Some of these operations are also directly available on the array
End of explanation
"""
np.save("/tmp/myarray", np.arange(10))
# The .npy extension is automatically added
!cat /tmp/myarray.npy
np.load("/tmp/myarray.npy") # You DO need to specify the .npy extension when loading
"""
Explanation: File IO ##
You can easily store/retrieve numpy arrays from files.
End of explanation
"""
np.savez("/tmp/myarray2", a=np.arange(2000))
np.load("/tmp/myarray2.npz")['a'] # Loading from a npz file is lazy, you need to specify which array to load
"""
Explanation: You can also save/load as a zip file using savez and loadz.
End of explanation
"""
!echo "1,2,3,4" > /tmp/numpytxtsample.txt
!cat /tmp/numpytxtsample.txt
np.loadtxt("/tmp/numpytxtsample.txt", delimiter=",")
"""
Explanation: You can also load other file formats using loadtxt.
End of explanation
"""
x = np.array([[1,2,3],[4,5,6], [7,8,9]])
y = np.array([[9,8,7],[6,5,4],[3,2,1]])
x,y
# Matrix multiplication
np.dot(x,y) # same as: x.dot(y)
# The numpy.linalg package has a bunch of extra linear algebra functions
# For example, the determinant (https://en.wikipedia.org/wiki/Determinant)
from numpy.linalg import det
det(x)
"""
Explanation: Linear Algebra
Numpy also supports linear algebra, e.g.: matrix multiplication, determinants, etc
End of explanation
"""
|
yashdeeph709/Algorithms | PythonBootCamp/Complete-Python-Bootcamp-master/Print Formatting.ipynb | apache-2.0 | print 'This is a string'
"""
Explanation: Print Formatting
In this lecture we will briefly cover the various ways to format your print statements. As you code more and more, you will probably want to have print statements that can take in a variable into a printed string statement.
The most basic example of a print statement is:
End of explanation
"""
s = 'STRING'
print 'Place another string with a mod and s: %s' %(s)
"""
Explanation: Strings
You can use the %s to format strings into your print statements.
End of explanation
"""
print 'Floating point numbers: %1.2f' %(13.144)
print 'Floating point numbers: %1.0f' %(13.144)
print 'Floating point numbers: %1.5f' %(13.144)
print 'Floating point numbers: %10.2f' %(13.144)
print 'Floating point numbers: %25.2f' %(13.144)
"""
Explanation: Floating Point Numbers
Floating point numbers use the format %n1.n2f where the n1 is the total minimum number of digits the string should contain (these may be filled with whitespace if the entire number does not have this many digits. The n2 placeholder stands for how many numbers to show past the decimal point. Lets see some examples:
End of explanation
"""
print 'Here is a number: %s. Here is a string: %s' %(123.1,'hi')
print 'Here is a number: %r. Here is a string: %r' %(123.1,'hi')
"""
Explanation: Conversion Format methods.
It should be noted that two methods %s and %r actually convert any python object to a string using two separate methods: str() and repr(). We will learn more about these functions later on in the course, but you should note you can actually pass almost any Python object with these two methods and it will work:
End of explanation
"""
print 'First: %s, Second: %1.2f, Third: %r' %('hi!',3.14,22)
"""
Explanation: Multiple Formatting
Pass a tuple to the modulo symbol to place multiple formats in your print statements:
End of explanation
"""
print 'This is a string with an {p}'.format(p='insert')
# Multiple times:
print 'One: {p}, Two: {p}, Three: {p}'.format(p='Hi!')
# Several Objects:
print 'Object 1: {a}, Object 2: {b}, Object 3: {c}'.format(a=1,b='two',c=12.3)
"""
Explanation: Using the string .format() method
The best way to format objects into your strings for print statements is using the format method. The syntax is:
'String here {var1} then also {var2}'.format(var1='something1',var2='something2')
Lets see some examples:
End of explanation
"""
|
fantasycheng/udacity-deep-learning-project | tutorials/intro-to-tflearn/TFLearn_Digit_Recognition_Solution.ipynb | mit | # Import Numpy, TensorFlow, TFLearn, and MNIST data
import numpy as np
import tensorflow as tf
import tflearn
import tflearn.datasets.mnist as mnist
"""
Explanation: Handwritten Number Recognition with TFLearn and MNIST
In this notebook, we'll be building a neural network that recognizes handwritten numbers 0-9.
This kind of neural network is used in a variety of real-world applications including: recognizing phone numbers and sorting postal mail by address. To build the network, we'll be using the MNIST data set, which consists of images of handwritten numbers and their correct labels 0-9.
We'll be using TFLearn, a high-level library built on top of TensorFlow to build the neural network. We'll start off by importing all the modules we'll need, then load the data, and finally build the network.
End of explanation
"""
# Retrieve the training and test data
trainX, trainY, testX, testY = mnist.load_data(one_hot=True)
"""
Explanation: Retrieving training and test data
The MNIST data set already contains both training and test data. There are 55,000 data points of training data, and 10,000 points of test data.
Each MNIST data point has:
1. an image of a handwritten digit and
2. a corresponding label (a number 0-9 that identifies the image)
We'll call the images, which will be the input to our neural network, X and their corresponding labels Y.
We're going to want our labels as one-hot vectors, which are vectors that holds mostly 0's and one 1. It's easiest to see this in a example. As a one-hot vector, the number 0 is represented as [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], and 4 is represented as [0, 0, 0, 0, 1, 0, 0, 0, 0, 0].
Flattened data
For this example, we'll be using flattened data or a representation of MNIST images in one dimension rather than two. So, each handwritten number image, which is 28x28 pixels, will be represented as a one dimensional array of 784 pixel values.
Flattening the data throws away information about the 2D structure of the image, but it simplifies our data so that all of the training data can be contained in one array whose shape is [55000, 784]; the first dimension is the number of training images and the second dimension is the number of pixels in each image. This is the kind of data that is easy to analyze using a simple neural network.
End of explanation
"""
# Visualizing the data
import matplotlib.pyplot as plt
%matplotlib inline
# Function for displaying a training image by it's index in the MNIST set
def display_digit(index):
label = trainY[index].argmax(axis=0)
# Reshape 784 array into 28x28 image
image = trainX[index].reshape([28,28])
plt.title('Training data, index: %d, Label: %d' % (index, label))
plt.imshow(image, cmap='gray_r')
plt.show()
# Display the first (index 0) training image
display_digit(0)
"""
Explanation: Visualize the training data
Provided below is a function that will help you visualize the MNIST data. By passing in the index of a training example, the function show_digit will display that training image along with it's corresponding label in the title.
End of explanation
"""
# Define the neural network
def build_model():
# This resets all parameters and variables, leave this here
tf.reset_default_graph()
# Inputs
net = tflearn.input_data([None, trainX.shape[1]])
# Hidden layer(s)
net = tflearn.fully_connected(net, 128, activation='ReLU')
net = tflearn.fully_connected(net, 32, activation='ReLU')
# Output layer and training model
net = tflearn.fully_connected(net, 10, activation='softmax')
net = tflearn.regression(net, optimizer='sgd', learning_rate=0.01, loss='categorical_crossentropy')
model = tflearn.DNN(net)
return model
# Build the model
model = build_model()
"""
Explanation: Building the network
TFLearn lets you build the network by defining the layers in that network.
For this example, you'll define:
The input layer, which tells the network the number of inputs it should expect for each piece of MNIST data.
Hidden layers, which recognize patterns in data and connect the input to the output layer, and
The output layer, which defines how the network learns and outputs a label for a given image.
Let's start with the input layer; to define the input layer, you'll define the type of data that the network expects. For example,
net = tflearn.input_data([None, 100])
would create a network with 100 inputs. The number of inputs to your network needs to match the size of your data. For this example, we're using 784 element long vectors to encode our input data, so we need 784 input units.
Adding layers
To add new hidden layers, you use
net = tflearn.fully_connected(net, n_units, activation='ReLU')
This adds a fully connected layer where every unit (or node) in the previous layer is connected to every unit in this layer. The first argument net is the network you created in the tflearn.input_data call, it designates the input to the hidden layer. You can set the number of units in the layer with n_units, and set the activation function with the activation keyword. You can keep adding layers to your network by repeated calling tflearn.fully_connected(net, n_units).
Then, to set how you train the network, use:
net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy')
Again, this is passing in the network you've been building. The keywords:
optimizer sets the training method, here stochastic gradient descent
learning_rate is the learning rate
loss determines how the network error is calculated. In this example, with categorical cross-entropy.
Finally, you put all this together to create the model with tflearn.DNN(net).
End of explanation
"""
# Training
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=100, n_epoch=100)
"""
Explanation: Training the network
Now that we've constructed the network, saved as the variable model, we can fit it to the data. Here we use the model.fit method. You pass in the training features trainX and the training targets trainY. Below I set validation_set=0.1 which reserves 10% of the data set as the validation set. You can also set the batch size and number of epochs with the batch_size and n_epoch keywords, respectively.
Too few epochs don't effectively train your network, and too many take a long time to execute. Choose wisely!
End of explanation
"""
# Compare the labels that our model predicts with the actual labels
# Find the indices of the most confident prediction for each item. That tells us the predicted digit for that sample.
predictions = np.array(model.predict(testX)).argmax(axis=1)
# Calculate the accuracy, which is the percentage of times the predicated labels matched the actual labels
actual = testY.argmax(axis=1)
test_accuracy = np.mean(predictions == actual, axis=0)
# Print out the result
print("Test accuracy: ", test_accuracy)
"""
Explanation: Testing
After you're satisified with the training output and accuracy, you can then run the network on the test data set to measure it's performance! Remember, only do this after you've done the training and are satisfied with the results.
A good result will be higher than 95% accuracy. Some simple models have been known to get up to 99.7% accuracy!
End of explanation
"""
|
nilmtk/nilmtk | docs/manual/user_guide/disaggregation_and_metrics.ipynb | apache-2.0 | from __future__ import print_function, division
import time
from matplotlib import rcParams
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from six import iteritems
from nilmtk import DataSet, TimeFrame, MeterGroup, HDFDataStore
from nilmtk.legacy.disaggregate import CombinatorialOptimisation, FHMM
import nilmtk.utils
%matplotlib inline
rcParams['figure.figsize'] = (13, 6)
"""
Explanation: Disaggregation
End of explanation
"""
train = DataSet('/data/redd.h5')
test = DataSet('/data/redd.h5')
"""
Explanation: Dividing data into train and test set
End of explanation
"""
building = 1
"""
Explanation: Let us use building 1 for demo purposes
End of explanation
"""
train.set_window(end="2011-04-30")
test.set_window(start="2011-04-30")
train_elec = train.buildings[1].elec
test_elec = test.buildings[1].elec
"""
Explanation: Let's split data at April 30th
End of explanation
"""
train_elec.plot()
test_elec.mains().plot()
"""
Explanation: Visualizing the data
End of explanation
"""
fridge_meter = train_elec['fridge']
fridge_df = next(fridge_meter.load())
fridge_df.head()
mains = train_elec.mains()
mains_df = next(mains.load())
mains_df.head()
"""
Explanation: REDD data set has got appliance level data sampled every 3 or 4 seconds and mains data sampled every 1 second. Let us verify the same.
End of explanation
"""
top_5_train_elec = train_elec.submeters().select_top_k(k=5)
top_5_train_elec
"""
Explanation: Since, both of these are sampled at different frequencies, we will downsample both to 1 minute resolution. We will also select the top-5 appliances in terms of energy consumption and use them for training our FHMM and CO models.
Selecting top-5 appliances
End of explanation
"""
def predict(clf, test_elec, sample_period, timezone):
pred = {}
gt= {}
# "ac_type" varies according to the dataset used.
# Make sure to use the correct ac_type before using the default parameters in this code.
for i, chunk in enumerate(test_elec.mains().load(physical_quantity = 'power', ac_type = 'apparent', sample_period=sample_period)):
chunk_drop_na = chunk.dropna()
pred[i] = clf.disaggregate_chunk(chunk_drop_na)
gt[i]={}
for meter in test_elec.submeters().meters:
# Only use the meters that we trained on (this saves time!)
gt[i][meter] = next(meter.load(physical_quantity = 'power', ac_type = 'active', sample_period=sample_period))
gt[i] = pd.DataFrame({k:v.squeeze() for k,v in iteritems(gt[i]) if len(v)}, index=next(iter(gt[i].values())).index).dropna()
# If everything can fit in memory
gt_overall = pd.concat(gt)
gt_overall.index = gt_overall.index.droplevel()
pred_overall = pd.concat(pred)
pred_overall.index = pred_overall.index.droplevel()
# Having the same order of columns
gt_overall = gt_overall[pred_overall.columns]
#Intersection of index
gt_index_utc = gt_overall.index.tz_convert("UTC")
pred_index_utc = pred_overall.index.tz_convert("UTC")
common_index_utc = gt_index_utc.intersection(pred_index_utc)
common_index_local = common_index_utc.tz_convert(timezone)
gt_overall = gt_overall.loc[common_index_local]
pred_overall = pred_overall.loc[common_index_local]
appliance_labels = [m for m in gt_overall.columns.values]
gt_overall.columns = appliance_labels
pred_overall.columns = appliance_labels
return gt_overall, pred_overall
"""
Explanation: Training and disaggregation
A function to disaggregate the mains data to constituent appliances and return the predictions
End of explanation
"""
classifiers = {'CO':CombinatorialOptimisation(), 'FHMM':FHMM()}
predictions = {}
sample_period = 120
for clf_name, clf in classifiers.items():
print("*"*20)
print(clf_name)
print("*" *20)
start = time.time()
# Note that we have given the sample period to downsample the data to 1 minute.
# If instead of top_5 we wanted to train on all appliance, we would write
# fhmm.train(train_elec, sample_period=60)
clf.train(top_5_train_elec, sample_period=sample_period)
end = time.time()
print("Runtime =", end-start, "seconds.")
gt, predictions[clf_name] = predict(clf, test_elec, sample_period, train.metadata['timezone'])
"""
Explanation: Train using 2 benchmarking algorithms - Combinatorial Optimisation (CO) and Factorial Hidden Markov Model (FHMM)
End of explanation
"""
appliance_labels = [m.label() for m in gt.columns.values]
gt.columns = appliance_labels
predictions['CO'].columns = appliance_labels
predictions['FHMM'].columns = appliance_labels
"""
Explanation: Using prettier labels!
End of explanation
"""
gt.head()
predictions['CO'].head()
predictions['FHMM'].head()
"""
Explanation: Taking a look at the ground truth of top 5 appliance power consumption
End of explanation
"""
predictions['CO']['Fridge'].head(300).plot(label="Pred")
gt['Fridge'].head(300).plot(label="GT")
plt.legend()
predictions['FHMM']['Fridge'].head(300).plot(label="Pred")
gt['Fridge'].head(300).plot(label="GT")
plt.legend()
"""
Explanation: Plotting the predictions against the actual usage
End of explanation
"""
? nilmtk.utils.compute_rmse
rmse = {}
for clf_name in classifiers.keys():
rmse[clf_name] = nilmtk.utils.compute_rmse(gt, predictions[clf_name])
rmse = pd.DataFrame(rmse)
rmse
"""
Explanation: Comparing NILM algorithms (CO vs FHMM)
nilmtk.utils.compute_rmse is an extended of the following, handling both missing values and labels better:
python
def compute_rmse(gt, pred):
from sklearn.metrics import mean_squared_error
rms_error = {}
for appliance in gt.columns:
rms_error[appliance] = np.sqrt(mean_squared_error(gt[appliance], pred[appliance]))
return pd.Series(rms_error)
End of explanation
"""
|
mdeff/ntds_2017 | projects/reports/wikipedia_hyperlink/ntds_project.ipynb | mit | import numpy as np
import seaborn as sns
import networkx as nx
import matplotlib.pyplot as plt
import operator
import community
import plotly
import plotly.graph_objs as go
import plotly.plotly as py
from networkx.drawing.nx_agraph import graphviz_layout
from scipy import linalg, cluster, sparse
from tqdm import tqdm_notebook
from utils import load_obj, save_obj
%matplotlib inline
%load_ext autoreload
%autoreload 2
"""
Explanation: Community detection on the Wikipedia hyperlink graph
Team members:
* Armand Boschin
* Bojana Ranković
* Quentin Rebjock
In order to read this properly (especially the graph visualization in the end), you might want to used this link to NBViewer.
Please go through the README.md file beforehand.
NB:
* Please take the time to download the pickle file shortest_paths.pkl from this link. It should be put in the directory data/.
* Some cells are not executed and are in comments because they are really long. All the output files are directly dumped into pickle files. Those should be present in the directory data/.
- scraping step ($\sim 3$ hours) : network.pkl
- shortest-paths step ($\sim 5$ hours) : shortest_paths.pkl
- cross-validation step ($\sim 1$ hour) : cross_val.pkl
This is a reminder of the project proposal adapted to the reality of the project:
Graph: Wikipedia hyperlink network
Problem: Does the structure of the graph bears info on the content of the nodes ? We would like to find out if it is possible to detect communities of pages just by looking at the hyperlink connections and match these communities with real-world data such as categories of the pages. Is spectral clustering a viable possibility compared to proven method of community detection.
Steps of the project:
* Scraping the Wikipedia hyperlink network. Start from one node and get the pages as far as 2 or 3 hops depending on the number of nodes we get.
* Model the network by a random graph/scale-free network/something else in order to try to retrieve some of its characteristics.
* Apply Louvain algorithm for community detection to get a baseline to compare spectral clustering to. Indeed in term of community detection, Louvain is quite usual and our approach is to test the performance of spectral clustering.
* Try to apply spectral clustering in order to detect communities of pages.
* Visualize the clusters to match them with real-world categories (using some of the tools from the last guest lecture).
Table of contents
1) Data Acquisition
<br><br>
2) Data Exploration
<br><br>
3) Data Exploitation
3.1) Modelisation of the network
3.1.1) Exploration of the degree distribution
3.1.2) Modelisation with usual graphs models
3.1.3) A power law network with the right exponent
3.1.4 Comparison of the models
3.1.5) Comparison with common networks from the web <br><br>
3.2) Community detection using Spectral clustering
3.2.1) Louvain community detection
3.2.2) Spectral Clustering
3.2.3 Comparison of the two methods
3.2.4 Visualization
<br><br>
4) Conclusion
End of explanation
"""
from utils import explore_page
root_node = 'Jaguar (disambiguation)'
network = {} # This dict stores for each page a dictionnary containing the keys [url, links, categories]
first_nodes = []
explore_page(root_node, network, first_nodes)
second_nodes = []
for node in first_nodes:
explore_page(node, network, second_nodes)
"""
Explanation: 1) Data Acquisition <a class="anchor" id="Data Acquisition"></a>
We want to acquire a sub-network of the Wikipedia hyperlink network. In such a graph, each node is a Wikipedia page and there is a link between node $a$ and node $b$ if there is a link to page $b$ on page $a$. This is a directed network but we will make it undirected later on.
The process of the acquisition is the following :
* Start from an arbitrary root page (prefer an ambiguous page in order to get as many different communities as possible).
* Explore this page to get the intra-wiki links : the first nodes.
* For each first node, explore the intra-wiki links to get the second nodes.
* Look for inner connections (links from second nodes to first nodes tipically).
* Eventually, for each node, take the intersection of its neighbor with the collected nodes (it can be that some nodes have neighbors that have not been collected, for example a second node that has been collected from a disambiguation page).
We use the Wikipedia API that allows us to scrap pages and get links and categories for each one. We chose to include in our network only real pages (not the disambiguation ones). Those pages are indeed useful during the scraping because they allow us to get a larger sample of the real graph. Disambiguation pages act like bridges between pages that have nothing to do together.
For each node we need to get URL, title, categories and links to other pages.
We use as root_node the disambiguation page Jaguar (disambiguation) as it lists a really wide variety of themes (animals, cars, music, films, weapons...). It can lead us to a large selection of categories.
The function explore_page is implemented in the file utils.py. All implementation details are provided there.
End of explanation
"""
all_nodes = list(network.keys()) + second_nodes
for link in tqdm_notebook(second_nodes):
explore_page(link, network, [], inner=True, all_nodes=all_nodes)
"""
Explanation: Look for connections between second nodes and the rest of the nodes.
End of explanation
"""
all_nodes = list(network.keys())
for title in tqdm_notebook(network.keys()):
network[title]['links'] = list(set(network[title]['links']).intersection(set(all_nodes)))
"""
Explanation: The above cell took 2 hours and 47 minutes to run (duration of scraping).
Now we need to go through all the nodes in order to remove from their neighbors any page that has not been scraped.
End of explanation
"""
l = list(network.keys())
for i in l:
if len(network[i]['links']) == 0:
del network[i]
"""
Explanation: The previous step can lead to pages with no neighbor (if a second node comes from a disambiguation page and all its neigbors are not in first or second nodes).
End of explanation
"""
for i, title in enumerate(network.keys()):
cats = network[title]['categories']
new_cats = []
for c in cats:
if not c.startswith('Redundant') and not c.startswith('Pages') and not c.startswith('Webarchive') and not c.startswith('Wikipedia') and not c.startswith('Articles') and not c.startswith('Coordinates on Wikidata') and not 'Wikidata' in c and not c.startswith('CS1') and not c.startswith('EngvarB') and not c.startswith('All') and not c.startswith('Good articles') and not c.startswith('Use dmy'):
new_cats.append(c)
network[title]['categories'] = new_cats
"""
Explanation: Cleaning the categories
There are some categories for each page that are irrelevant in our work (e.g. "All articles with unsourced statements"). We need to get rid of those.
End of explanation
"""
# save_obj(network, 'network')
network = load_obj('network')
"""
Explanation: Creating pickle files
As the scraping of the network takes quite some time ($\sim$ 3 hours) (especially getting the inner connections), we store the results in pickle files.
End of explanation
"""
neighbors = {}
for i in network.keys():
neighbors[i] = network[i]['links']
g = nx.Graph(neighbors) # undirected graph
"""
Explanation: Network creation
Let's convert the collected network into a NetworkX instance which is quite handy to manipulate.
Let's make it undirected as well.
End of explanation
"""
print('Total number of nodes : {}'.format(len(g.nodes)))
print('Total number of edges : {}'.format(len(g.edges)))
if nx.is_connected(g):
print('The graph is connected.')
else:
print('The graph is not connected.')
"""
Explanation: 2) Data Exploration <a class="anchor" id="Data Exploration"></a>
In this part of the notebook, we provide some indicators of the data in order to understand what we'll be working on.
Adjacency matrix
Degrees distribution
Average degree
Diameter of the collected network
Visualization of the network
End of explanation
"""
adj = nx.adjacency_matrix(g)
plt.spy(adj.todense())
"""
Explanation: Adjacency Matrix
End of explanation
"""
(adj != adj.T).count_nonzero() == 0
"""
Explanation: Check if it's symmetric :
End of explanation
"""
degrees = np.array(adj.sum(axis=1)).squeeze()
degrees_truncated = degrees[degrees < 700]
fig, ax = plt.subplots(ncols=2, sharey=True, figsize=(15,5))
ax[0].set_title('Degree distribution')
ax[0].hist(degrees, bins=50)
ax[1].set_title('Truncated degree distribution')
ax[1].hist(degrees_truncated, bins=20)
plt.tight_layout()
plt.show()
fig, ax = plt.subplots(ncols=2, sharey=True, figsize=(15,5))
ax[0].set_title('Degree box plot')
sns.boxplot(degrees, ax=ax[0])
ax[1].set_title('Truncated degree box plot')
sns.boxplot(degrees_truncated, ax=ax[1])
plt.tight_layout()
plt.show()
"""
Explanation: Degrees distribution
As there are some clear outliers making the visualization difficult, we can truncate the degrees or just use a box plot.
End of explanation
"""
avg_degree = np.mean(degrees)
print('The average degree of the network is {}.'.format(np.round(avg_degree, 2)))
"""
Explanation: Average degree
End of explanation
"""
# shortest_paths = dict(nx.shortest_path_length(g))
# save_obj(shortest_paths, 'shortest_paths')
"""
Explanation: Diameter
First we compute the shortest paths lengths. NetworkX allows us to do the computation and returns a dictionnary. This will be useful later on.
End of explanation
"""
shortest_paths = load_obj('shortest_paths')
"""
Explanation: As this computation is quite long ($\sim$ 5 hours), we dumped the resulting dictionnary in a pickle file.
End of explanation
"""
nodes = list(network.keys())
distances = np.zeros(shape=(len(nodes), len(nodes)))
for i in range(len(nodes)):
for j in range(len(nodes)):
distances[i, j] = shortest_paths[nodes[i]][nodes[j]]
diameter = np.amax(distances)
print('The diameter of the network is {}.'.format(int(diameter)))
"""
Explanation: Now the computing the diameter of the networks comes down to finding the largest distance. Let's turn the dictionnary into a numpy array that is faster to manipulate.
End of explanation
"""
nx.draw(g, node_size=5, figsize=(15, 15))
"""
Explanation: At first sight, if we had scraped first nodes and then second nodes, we should have had a diameter less than 4. Because a node should be at distance at most 2 from the root node.
Here, thanks to the use of disambiguation pages, we manage to get nodes that are further away from the root node but surprisingly our graph is connected anyway.
Visualization
End of explanation
"""
from utils import get_distribution, linear_regression_coefficient
from utils import plot_degree_distribution, print_distribution, print_denoised_degree_distribution
"""
Explanation: 3) Data Exploitation <a class="anchor" id="Data Exploitation"></a>
3.1) Modelisation of the network <a class="anchor" id="Modelisation of the network"></a>
In this section, we are trying to model the collected network with a simpler one, trying to get the same main features like the number of nodes, the number of edges, the degree distribution, the shape of the giant components, and so on. Such a model is particularly useful to understand the original structure and compare it to other famous and already known networks.
In this modelisation part, we are using functions implemented in the utils.py in order to plot degree distributions and to get the regression coefficient of a power law.
End of explanation
"""
nNodes = len(g.nodes())
nEdges = g.size()
print('The network has {0} nodes and {1} edges.'.format(nNodes, nEdges))
print('The minimum and the maximum degrees are respectively {0} and {1}.'.format(np.min(degrees), np.max(degrees)))
print_distribution(g, a=1000)
"""
Explanation: 3.1.1) Exploration of the degree distribution <a class="anchor" id="Exploration of the degree distribution"></a>
Let's first try to plot various transformations of the degree distribution in order to get a sense of a model that could fit.
End of explanation
"""
linear_regression_coefficient(g, title='Linear regression of the original degree distribution')
"""
Explanation: The previous plots show that the degree distribution of the network is complicated and doesn't fit exactly any of the basic network structures studied during the semester. However the last log-log plot suggests that a scale-free network with a power law could approximate the distribution. Let's make a regression to see what coefficient would fit. We use the linear_model function from sklearn.
End of explanation
"""
p = 2 * nEdges / nNodes / (nNodes - 1)
print('The probability hyper-parameter giving the best approximation of the number of edges is {}'.format(np.round(p, 4)))
er = nx.erdos_renyi_graph(nNodes, p)
plot_degree_distribution(er, title='Degree distribution of the Erdős–Rényi graph')
"""
Explanation: The value of $R^2$ is not really close to 1 but a scale free network model does not seem too bad anyway.
We will later use that regression to build an approximation of the network. We make the assumption that the network distribution follows a power law of coefficient -1.0693.
3.1.2) Modelisation with usual graphs models <a class="anchor" id="Modelisation with usual graphs models"></a>
Erdős–Rényi graph
The Erdős–Rényi graph models a random network where each pair of nodes has a fixed probability to be linked. We want this network to have the same number of nodes as the original one, and approximate the number of edges as much as possible.
End of explanation
"""
ba = nx.barabasi_albert_graph(nNodes, 54)
print('This Barabási-Albert network has {0} edges while our original network has {1} edges.'.format(ba.size(), nEdges))
plot_degree_distribution(ba, title='Degree distribution of the Barabási-Albert graph')
"""
Explanation: As expected, it clearly doesn't match the distribution of our network. The random networks have a Poisson degree distribution (when the number of nodes is large) and it doesn't fit to the observed distribution.
Barabási-Albert
The Barabási-Albert graph follows a power law distribution (in theory $p(k) = C \times k^{-3}$) so we can hope much better results than with the Erdős–Rényi model. The number of nodes that we want in the graph is fixed and we can only play with the parameter specifying the number of edges to attach from a new node to existing nodes. With the trial and error method, we found out that setting this parameter to 54 gives the closest number of edges to our original graph.
End of explanation
"""
print_denoised_degree_distribution(ba, b=200, d=200)
"""
Explanation: It indeed seems to be a power law distribution. Let's have a deeper insight and try to measure the parameter of this power law. The coefficient of such a random graph should be 3 in theory.
End of explanation
"""
linear_regression_coefficient(ba, limit=300, title='Linear regression of the Barabási-Albert degree distribution')
"""
Explanation: Regression to measure the model law's coefficient
End of explanation
"""
ba_degrees = list(dict(ba.degree()).values())
f, ax = plt.subplots(figsize=(15, 6))
sns.distplot(degrees_truncated, label='Collected network', ax=ax)
sns.distplot(ba_degrees, label='Barabási-Albert network', ax=ax)
plt.legend(loc='upper right')
plt.show()
"""
Explanation: We get a coefficient 2.7 that is close to 3 the expected value. Thus this network will be a better approximation than the random network precedently exposed but is still not ideal : we would like a power law network whose coefficient is closer to 1.0693 as computed earlier.
Comparison between the collected and the Barabási-Albert network distributions
End of explanation
"""
while True:
# Iterate the construction of a degree sequence until we find one that has a pair sum
# (this is a requirement of the function configuration_model)
s = []
while len(s) < nNodes:
# generate degrees one at a time
nextval = int(nx.utils.powerlaw_sequence(1, 1.6)[0])
if nextval != 0:
s.append(nextval)
if sum(s) % 2 == 0:
# we found a proper distribution, can break!
break
power_law = nx.configuration_model(s)
power_law = nx.Graph(power_law) # remove parallel edges
power_law.remove_edges_from(power_law.selfloop_edges())
print('This power law network has {0} nodes and {1} edges.'.format(len(power_law), power_law.size()))
"""
Explanation: We clearly see here that it is a better approximation than the Erdős–Rényi graph but is still not ideal.
3.1.3) A power law network with the right exponent <a class="anchor" id="A power law network with the right exponent"></a>
In this section we are trying to make a power law network with a closer exponent to the one measured in the regression of the original network. We didn't find any method to make a graph with the exact exponent but we approximated it with the following code.
The configuration_model method from NetworkX allows us to create a graph from a given list of degrees.
In order to create a list of degrees respecting a power law distribution of coefficient $\gamma$, we use the function powerlaw_sequence from NetworkX. However this function can return zeros and we don't want to do that because our graph is connected. So what we do is generate each degree one at a time and check that it's not 0.
End of explanation
"""
print_denoised_degree_distribution(power_law, a=100, b=200, d=200)
"""
Explanation: We note right now that the number of edges in this model is really lower to the value in the collected network (367483).
It seems that the lowest coefficient we can set for the power law is 1.6. All the other attemps with smaller coefficient have crashed.
We can check that it indeed follows a power law distribution :
End of explanation
"""
linear_regression_coefficient(power_law, limit=79, title='Linear regression of the power-law degree distribution')
"""
Explanation: And we calculate here the coefficient of the power law :
End of explanation
"""
pl_degrees = list(dict(power_law.degree()).values())
f, ax = plt.subplots(figsize=(15, 6))
sns.distplot(degrees_truncated, label='Collected network', ax=ax)
sns.distplot(pl_degrees, label='Barabási-Albert network', ax=ax)
plt.legend(loc='upper right')
axes = plt.gca()
axes.set_xlim([-100, 1000])
plt.show()
"""
Explanation: It's indeed closer to the original network but there is still a little gap (reminder, the obtjective is 1.0693). However as noted earlier, the number of edges of this power law network is extremely low compared to the original network. It seems like the Barabási-Albert network is a better approximation even if the fit of the distribution is not as good.
The following plot comparing the obtained degree distribution to the original one confirms that the Barabási-Albert network is a better approximation.
End of explanation
"""
giant_g = max(nx.connected_component_subgraphs(g), key=len)
giant_er = max(nx.connected_component_subgraphs(er), key=len)
giant_ba = max(nx.connected_component_subgraphs(ba), key=len)
giant_pl = max(nx.connected_component_subgraphs(power_law), key=len)
print('Size of the giant component / Size of the network ')
print('Collected network : \t {}/{}'.format(len(giant_g.nodes()), len(g.nodes())))
print('Erdős–Rényi model : \t {}/{}'.format(len(giant_er.node()), len(er.nodes())))
print('Barabási-Albert model : {}/{}'.format(len(giant_ba.nodes()), len(ba.nodes())))
print('Power law model : \t {}/{}'.format(len(giant_pl.nodes), len(power_law.nodes)))
"""
Explanation: 3.1.4 Comparison of the models <a class="anchor" id="Comparison of the models"></a>
Giant components
In this part we are analyzing the giant components of the original network and of the models.
End of explanation
"""
avg_clustering_g = nx.average_clustering(g)
avg_clustering_er = nx.average_clustering(er)
avg_clustering_ba = nx.average_clustering(ba)
avg_clustering_pl = nx.average_clustering(power_law)
print('Clustering coefficients')
print('Collected network : \t {}'.format(np.round(avg_clustering_g, 3)))
print('Erdős–Rényi model : \t {}'.format(np.round(avg_clustering_er, 3)))
print('Barabási-Albert model : {}'.format(np.round(avg_clustering_ba, 3)))
print('Power law model : \t {}'.format(np.round(avg_clustering_pl, 3)))
"""
Explanation: The original network, the Erdős–Rényi and the Barabási-Albert graphs are fully connected. The modelisation with the last power law network has also a very big giant component and is almost fully connected. We can conclude that the connectedness of the network is respected.
Clustering coefficient
The average clustering coefficient measures the overall degree of clustering in the network. Real-world networks tend to have a higher average clustering coefficient because of their ability to have compact groupements of nodes so we expect it to be greater than the models.
End of explanation
"""
print('Average degree : {}'.format(np.round(np.mean(degrees), 1)))
print('{} nodes with degree 5 times bigger.'.format(np.count_nonzero(degrees > 5*np.mean(degrees))))
print('{} nodes with degree 10 times bigger.'.format(np.count_nonzero(degrees > 10*np.mean(degrees))))
"""
Explanation: The last model created following a power law has the closest clustering coefficient. However, the really low number of edges is critical to make it a good model.
3.1.5) Comparison with common networks from the web <a class="anchor" id="Comparison with common networks from the web"></a>
Most scale-free networks follow a distribution of the form $p(k) = C\times k^{-\gamma}$ where $2 < \gamma < 3$ usually. In the approximation by a power law distribution we made, we found out that $\gamma \simeq 1.0693$ which is not really a common value as the following array shows (values seen during the lectures).
| Network | Gamma |
|----------------------|-------|
| WWW in | 2.00 |
| WWW out | 2.31 |
| Emails in | 3.43 |
| Emails out | 2.03 |
| Actor | 2.12 |
| Protein interactions | 2.12 |
| Citations in | 3.03 |
| Citations out | 4.00 |
Is a scale free network such a good model for our collected network ? We saw that the fit is not too bad but there are also more empirical reasons for such a model.
We may wonder why a scale free network seems to be a good approximation of the collected network. One of the most notable caracteristic of a scale free network is the presence of nodes with a degree much larger than the average which is the case here :
End of explanation
"""
from utils import get_bag_of_communities
"""
Explanation: It's in fact quite intuitive when you know that the network is composed of nodes representing wikipedia webpages and being linked if there is a link directing from one of the page to the other one. We expect a few large hubs (Wikipedia pages covering an important subject) appearing in such a network, followed by smaller ones (moderately important subjects) in a larger proportion and finally quite a lot of minor ones. The plots above show that the distribution respects that trend pretty much, except that there are fewer minor and very important topics that in a real scale free network.
This difference is likely to come directly from our sampling method. Indeed as we start from a central node and stop the collection somewhere, central nodes are important and at the end of the network we get what looks like minor pages. Those one could have been important if we had pushed the collection one hop further from the root node.
The preferential attachment process is another intuitive way to understand why the scrapped network looks like a scale free network. This process is also known as "the rich get richer and the poor get poorer" : a quantity (here the links between the nodes) is distributed according to how much they already have. It has been shown that such a process produces scale free networks and most algorithms (like the Barabási-Albert one) use this principle to create such networks. Regarding wikipedia, the more popular a page is and the more the topic is important, the more links it will have and conversely for lesser known pages. It is exactly a preferential attachment phenomena.
3.2) Community detection using Spectral clustering <a class="anchor" id="Community detection using Spectral clustering"></a>
We will try to use the collected data to answer our problem which is:
Can we isolate communities of pages just by looking at the hyperlink graph ?
This is the famous community detection problem for which a popular method is the Louvain Algorithm.
The measure of performance we will use for the community detection is the modularity. Modularity measures the strengh of the division of a network into sub-groups. A network with high modularity has dense intra-connections (within sub-groups) and sparse inter-connections (between different groups).
Louvain as been presented in 2008 and though it has been improved [1], we will use this as a baseline to compare the performance of spectral clustering for community detection.
The steps are the following :
* Louvain algorithm as a baseline
* Spectral clustering
* Visualization of the communities
End of explanation
"""
louvain_partition = community.best_partition(g)
louvain_modularity = community.modularity(louvain_partition, g)
louvain_modularity
k_louvain = len(set(louvain_partition.values()))
print('Louvain algorithm found {} communities'.format(k_louvain))
"""
Explanation: 3.2.1) Louvain community detection <a class="anchor" id="Louvain community detection"></a>
We use the Python library community that implements the Louvain algorithm.
This library also allows us to compute the modularity of a given partition of the nodes.
End of explanation
"""
louvain_bag = get_bag_of_communities(network, louvain_partition)
"""
Explanation: We can try to visualize the categories of the nodes in each of these communities. From the scraping, we got for each page a list of categories in which the page belongs. Let's compute for each community what we'll call a bag of categories that is the list of all the categories of the nodes it contains and the count of the number of nodes that belong to this category for each one.
The function can be found in utils.py, it's implementation is quite straight-forward.
End of explanation
"""
louvain_counts = [0 for _ in range(k_louvain)]
for i, title in enumerate(louvain_partition.keys()):
louvain_counts[louvain_partition[title]] += 1
"""
Explanation: Let's get the number of pages in each community.
End of explanation
"""
for i in range(k_louvain):
sorted_bag = sorted(louvain_bag[i].items(), key=operator.itemgetter(1), reverse=True)
print(' ')
print('Community {}/{} ({} pages) : '.format(i+1, k_louvain, louvain_counts[i]))
for ind in range(10):
print(sorted_bag[ind])
"""
Explanation: Now we want to visualize the categories of the nodes in each community. We print for each community the 10 most represented categories of the community.
End of explanation
"""
laplacian = np.diag(degrees) - adj.todense()
laplacian = sparse.csr_matrix(laplacian)
plt.spy(laplacian.todense())
"""
Explanation: We can see that we get some nice results because it seems that a general topic can be infered for each community. The topics are:
| Alphabetical Order |
|---------------------------------|
| Aircrafts |
| American Footbal |
| Animals / mammals |
| Apple inc. |
| British ships |
| Cars |
| Comics and fictional characters |
| Electronics |
| Car racing |
| Luxury in Britain |
| Mexican soccer |
| Music instruments |
| Rugby |
| Science |
| Social science |
| Songwriters |
| Weapons |
3.2.2) Spectral Clustering <a class="anchor" id="Spectral Clustering"></a>
Nows let's try a spectral clustering approach for this community detection problem.
3.2.2.1 Using the natural graph
The first idea is to use the natural graph, that is each node is a page and there is an edge of weight 1 between two pages if one of the pages links to the other.
We define the graph laplacian using the formula $L = D- A$ where $D$ is the diagonal matrix containing the degrees and $A$ is the adjacency matrix.
End of explanation
"""
k_spectral = 21
eigenvalues, eigenvectors = sparse.linalg.eigsh(laplacian.asfptype(), k=k_spectral, which='SM')
plt.plot(eigenvalues, '.-', markersize=15)
eigenvalues[:2]
"""
Explanation: In order to do spectral clustering using this Laplacian, we need to compute the $k$ first eigenvalues and corresponding eigenvectors. We get a matrix $U$ of $\mathbb{R}^{n \times k}$ where $n$ is the number of nodes in the graph. Applying a k-means algorithm in order to clusterize the $n$ vectors of $\mathbb{R}^k$ corresponding to the lines of $U$ gives us a clustering of the $n$ nodes.
Here we need to specify the number of clusters (communities) we want to look for. As a reminder, Louvain returned 17 (sometimes it gives 16) communities (it seems that it gives the maximum modularity but let's recall that Louvain is a heuristic so we are not sure of that).
Later in this notebook (at the end of the development of the model), we run some sort of cross-validation on the parameter k_spectral. For different values, we run the algorithm 5 times and take the mean and standard deviation of the modularity. It seems that 21 gives the best results. Please see below for details on this.
End of explanation
"""
centroids, labels = cluster.vq.kmeans2(eigenvectors, k_spectral)
"""
Explanation: We check that the first eigenvalue is 0 but the second is not. The graph is connected.
Now we clusterize the resulting vectors in $\mathbb{R}^k$
End of explanation
"""
cc = [0 for i in range(k_spectral)]
for i in labels:
cc[i] += 1
', '.join([str(i) for i in cc])
"""
Explanation: This warning shows that at least one of the clusters is empty.
In order to get a first idea of how this algorithm did, let's look at the number of nodes in each cluster.
End of explanation
"""
kernel_width = distances.mean()
weights = np.exp(-np.square(distances)/kernel_width**2)
np.fill_diagonal(weights, 0)
"""
Explanation: We can see that with almost all the clusters containing less than 3 nodes, this first algorithm did not perform really well.
3.2.2.2 Building another graph
As we have seen in class and in one of the homework. In order for spectral clustering to work, we need to assign edge weights that are stronger the closer the nodes are.
Let's build another graph with still the same vertex but some new edges between them.
We have already computed the distances in the graph let's define edges with weights using a kernel (e.g. the Gaussian kernel).
End of explanation
"""
degrees = np.sum(weights, axis=0)
plt.hist(degrees, bins=50);
laplacian = np.diag(1/np.sqrt(degrees)).dot((np.diag(degrees) - weights).dot(np.diag(1/np.sqrt(degrees))))
"""
Explanation: This creates a complete graph. We could sparsify it for faster computations but this is not really long and experience seems to show that results are better with the full graph.
End of explanation
"""
tol = 1e-8
np.allclose(laplacian, laplacian.T, atol=tol)
eigenvalues, eigenvectors = linalg.eigh(laplacian, eigvals=(0, k_spectral-1))
plt.plot(eigenvalues, '.-', markersize=15)
centroids, labels = cluster.vq.kmeans2(eigenvectors, k_spectral)
cc = [0 for i in range(k_spectral)]
for i in labels:
cc[i] += 1
', '.join([str(i) for i in cc])
"""
Explanation: We can check that the obtained Laplacian matrix is symmetric.
End of explanation
"""
spectral_partition = {}
for i, title in enumerate(network.keys()):
spectral_partition[title] = labels[i]
spectral_bag = get_bag_of_communities(network, spectral_partition)
spectral_counts = [0 for _ in range(k_spectral)]
for i, title in enumerate(spectral_partition.keys()):
spectral_counts[spectral_partition[title]] += 1
for i in range(k_spectral):
sorted_bag = sorted(spectral_bag[i].items(), key=operator.itemgetter(1), reverse=True)
print(' ')
print('Community {}/{} ({} pages) : '.format(i+1, k_spectral, spectral_counts[i]))
if spectral_counts[i] > 0:
for ind in range(10):
print(sorted_bag[ind])
"""
Explanation: This seems better. We get pages distributed among all the clusters (with somme clusters more important than the others of course).
First let's have a look at the categories of each cluster.
End of explanation
"""
spectral_modularity = community.modularity(spectral_partition, g)
spectral_modularity
"""
Explanation: It seems that we get the same results. As we asked for more communities than Louvain, some of them are split but it's either a duplicate or a finer separation.
There are some inconsistensies in the partition we get:
- two communities for Songwriters
- three communities for Ship incidents
- two communities for NFL
- two communities for mammals
but the community electronics is now split into video games and computer hardware.
So we get more communities. Sometimes its just duplicates but sometimes it is a finer separation of two groups.
End of explanation
"""
"""cross_val = {}
for k in tqdm_notebook(range(10, 30)):
tmp = []
for _ in range(5):
eigenvalues, eigenvectors = linalg.eigh(laplacian, eigvals=(0, k-1))
centroids, labels = cluster.vq.kmeans2(eigenvectors, k)
spectral_partition = {}
for i, title in enumerate(network.keys()):
spectral_partition[title] = labels[i]
spectral_modularity = community.modularity(spectral_partition, g)
tmp.append(spectral_modularity)
cross_val[k] = [np.mean(tmp), np.std(tmp)]
save_obj(d, 'cross_val')"""
"""
Explanation: The modularity coefficient is lower.
Testing different values of k_spectral
Here we test different values of k. It seems after some testing that there is a high variance in the modularity of partitions returned by the algo (for a given k_spectral). In order to find out if there is really a value better than the others. We compute the mean and variance of modularity for a given value by running 5 times the algorithm.
End of explanation
"""
cross_val = load_obj('cross_val')
cross_val
"""
Explanation: As this computation takes approximately one hour to terminate, the results have been stored in a pickle file.
End of explanation
"""
community2color = {
0: sns.xkcd_rgb["peach"],
1: sns.xkcd_rgb["powder blue"],
2: sns.xkcd_rgb["light pink"],
3: sns.xkcd_rgb["chocolate"],
4: sns.xkcd_rgb["orange"],
5: sns.xkcd_rgb["magenta"],
6: sns.xkcd_rgb["purple"],
7: sns.xkcd_rgb["blue"],
8: sns.xkcd_rgb["deep blue"],
9: sns.xkcd_rgb["sky blue"],
10: sns.xkcd_rgb["olive"],
11: sns.xkcd_rgb["seafoam green"],
12: sns.xkcd_rgb["tan"],
13: sns.xkcd_rgb["mauve"],
14: sns.xkcd_rgb["hot pink"],
15: sns.xkcd_rgb["pale green"],
16: sns.xkcd_rgb["indigo"],
17: sns.xkcd_rgb["lavender"],
18: sns.xkcd_rgb["eggplant"],
19: sns.xkcd_rgb["brick"],
20: sns.xkcd_rgb["light blue"],
}
"""
Explanation: We see that the best modularity seems to be achieved with the parameter k of 21. However we note that the standard deviation is quite high in all the cases.
3.2.3 Comparison of the two methods <a class="anchor" id="Comparison of the two methods"></a>
It seems that no matter the value of k we choose, we wont be able to have a higher modularity than the one achieved by the Louvain algorithm. So what could be the advantages of the spectral approach ?
* Computational cost :
- Louvain algo : greedy algorithm that appears to run in $0(n \log n)$ where $n$ is the number
of nodes in the network.
- Spectral clustering : the computation of the Laplacian is already in $O(n^3)$ and that's without counting the shortest-paths matrix whose computation is costly $O(|E|+|V|\log |V|)$ using the Dijkstra algorithm. As we have many edges this is really costly.
The spectral clustering seems really more costly than the Louvain method. That's is something we had noticed in our study.
Better communities ?
We have seen that the communities are not better in the sense of the modularity (measuring the internal cohesion of communities versus the separation of different ones).
Could the partition could be better when it comes to the categories of the nodes ? In order to be able to measure that, we could apply a NLP pipeline on the categories of the pages to do Topic Selection. This could give us a more precise idea of the performance of the split when it comes to topics (and not only modularity).
3.2.4 Visualization <a class="anchor" id="3.2.4Visualization"></a>
Plotly visualization
First we want to visualize the graph in the notebook using plotly.
In order to get clean representation of how our nodes build communities, we define a color map for each community that will help us differentiate clusters in our network.
End of explanation
"""
position = nx.spring_layout(g)
for community in set(louvain_partition.values()) :
list_nodes = [nodes for nodes in louvain_partition.keys() if louvain_partition[nodes] == community]
nx.draw_networkx_nodes(g, position, list_nodes, node_size=20, node_color=community2color[int(community)])
nx.draw_networkx_edges(g, position, alpha=0.5)
plt.show()
"""
Explanation: A simple representation of our graph can be obtained using the networkx tool for drawing. Here already, we get an idea of how each category of wikipedia articles creates a clear dense connection of nodes in our graph.
End of explanation
"""
nx.set_node_attributes(g, spectral_partition, 'spectral')
nx.set_node_attributes(g, louvain_partition, 'louvain')
nx.set_node_attributes(g, position, 'position')
"""
Explanation: Now, in order to put this into a more interactive perspective we will be using plotly scatter plots, to help us play with our network. For each of the nodes, we set up attribute as in which community it belongs to based on Louvain or spectral partition. We can also assigne positions to each node. This is important in order to find a good representation of the network. Networkx and community packages come with built in functions for positioning networks according to various algorithms. After trying out various algorithms, we chose to use spring_layout. The result is not perfect but it is easy to use and to implement.
End of explanation
"""
from utils import build_communities, set_layout
data = build_communities('louvain','position', G=g, community2color=community2color)
layout = set_layout('Louvain')
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
data = build_communities('spectral', 'position', G=g, community2color=community2color)
layout = set_layout('spectral clustering')
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
"""
Explanation: We implemented two functions in utils that allow us to plot interactive graphs.
End of explanation
"""
for i in louvain_partition.keys():
louvain_partition[i] = str(louvain_partition[i])
for i in spectral_partition.keys():
spectral_partition[i] = str(spectral_partition[i])
"""
Explanation: The layout of the nodes is not ideal but we could not make it as clear as on Gephi (see below).
We can go through the nodes in graph and see the titles of the pages that belong to each cluster marked with different colors. As we have already observed, we managed to connect the communities to a category of articles, with using the network based on hyperlinks.
Gephi visualization
Though the previous visualizations can be handy, they are quite slow and the layout is not really representative of the structure of the graph. This is why we chose to use Gephi as our main visualization tool. Here is the result of some visualizations.
Using Gephi, we were able to get some nice visualizations of our clustering.
End of explanation
"""
nx.set_node_attributes(g, louvain_partition, 'louvain')
nx.set_node_attributes(g, spectral_partition, 'spectral')
for n, d in g.nodes(data=True):
del d['position']
nx.write_graphml(g, 'data/full_graph.graphml')
"""
Explanation: NetworkX requires all the node attributes to be strings to export them.
End of explanation
"""
|
cysuncn/python | study/machinelearning/tensorflow/TensorFlow-Examples-master/notebooks/0_Prerequisite/mnist_dataset_intro.ipynb | gpl-3.0 | # Import MNIST
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Load data
X_train = mnist.train.images
Y_train = mnist.train.labels
X_test = mnist.test.images
Y_test = mnist.test.labels
"""
Explanation: MNIST Dataset Introduction
Most examples are using MNIST dataset of handwritten digits. It has 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image, so each sample is represented as a matrix of size 28x28 with values from 0 to 1.
Overview
Usage
In our examples, we are using TensorFlow input_data.py script to load that dataset.
It is quite useful for managing our data, and handle:
Dataset downloading
Loading the entire dataset into numpy array:
End of explanation
"""
# Get the next 64 images array and labels
batch_X, batch_Y = mnist.train.next_batch(64)
"""
Explanation: A next_batch function that can iterate over the whole dataset and return only the desired fraction of the dataset samples (in order to save memory and avoid to load the entire dataset).
End of explanation
"""
|
phoebe-project/phoebe2-docs | development/tutorials/ltte.ipynb | gpl-3.0 | #!pip install -I "phoebe>=2.4,<2.5"
"""
Explanation: Rømer and Light Travel Time Effects (ltte)
Setup
Let's first make sure we have the latest version of PHOEBE 2.4 installed (uncomment this line if running in an online notebook session such as colab).
End of explanation
"""
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger('error')
b = phoebe.default_binary()
"""
Explanation: As always, let's do imports and initialize a logger and a new Bundle.
End of explanation
"""
b.add_dataset('lc', times=phoebe.linspace(-0.05, 0.05, 51), dataset='lc01')
"""
Explanation: Now let's add a light curve dataset to see how ltte affects the timings of eclipses.
End of explanation
"""
print(b['ltte@compute'])
"""
Explanation: Relevant Parameters
The 'ltte' parameter in context='compute' defines whether light travel time effects are taken into account or not.
End of explanation
"""
b['sma@binary'] = 100
b['q'] = 0.1
"""
Explanation: Comparing with and without ltte
In order to have a binary system with any noticeable ltte effects, we'll set a somewhat extreme mass-ratio and semi-major axis.
End of explanation
"""
b.set_value_all('atm', 'blackbody')
b.set_value_all('ld_mode', 'manual')
b.set_value_all('ld_func', 'logarithmic')
b.run_compute(irrad_method='none', ltte=False, model='ltte_off')
b.run_compute(irrad_method='none', ltte=True, model='ltte_on')
afig, mplfig = b.plot(show=True)
"""
Explanation: We'll just ignore the fact that this will be a completely unphysical system since we'll leave the radii and temperatures alone despite somewhat ridiculous masses - but since the masses and radii disagree so much, we'll have to abandon atmospheres and use blackbody.
End of explanation
"""
|
squishbug/DataScienceProgramming | 05-Operating-with-Multiple-Tables/AdvancedTables_orig-Copy1.ipynb | cc0-1.0 | import pandas as pd
import numpy as np
"""
Explanation: Advanced Tables
Why are databases so complex?
Data stored in a database may be split into multiple tables, each containing multiple columns. A column stores a single attribute of the data; a table stores a collection of related attributes.
The database also keeps track of the relationships between different tables.
Databases are designed to minimize redundancy and maintain data integrity, particularly when data is added, changed, or deleted.
Consistency when updating: no duplicate places for information https://en.wikipedia.org/wiki/Database_normalization
Performance https://en.wikipedia.org/wiki/Star_schema
Side note: you may also have to think about isolation level when working with a database where someone may be updating data as you're trying to read it. The isolation level determines the database read behavior in this situation. See https://en.wikipedia.org/wiki/Isolation_(database_systems).
Working with multiple tables
Two tables can be joined at a time. 'Join' is a binary operator. See https://en.wikipedia.org/wiki/Join_(SQL).
Tables must have key values that can be matched. Usually one table has a primary key and the other table has a foreign key.
Pandas
Pandas allows "merge", "join", and "concatenate" operations. See http://pandas.pydata.org/pandas-docs/version/0.18.1/merging.html#merge-join-and-concatenate for additional reading.
Pandas also allows reshaping and pivoting data tables, see http://pandas.pydata.org/pandas-docs/version/0.18.1/reshaping.html.
In this class, we will cover table joining, merging and concatenation. We will also go over using some of the time-series handling capabilities in Pandas.
End of explanation
"""
Employees = pd.read_excel('/home/data/AdventureWorks/Employees.xls')
Territory = pd.read_excel('/home/data/AdventureWorks/SalesTerritory.xls')
Customers = pd.read_excel('/home/data/AdventureWorks/Customers.xls')
Orders = pd.read_excel('/home/data/AdventureWorks/ItemsOrdered.xls')
"""
Explanation: Concatenating tables in Pandas
To introduce join operations, we will be working with the AdventureWorks dataset, a standard dataset from Microsoft SLQ Server for learing to work with databases. It contains data for the fictitious bicycle manufacturer (Adventure Works Cycles).
Let's starts by importing some tables from AdventureWorks in /home/data/AdventureWorks. These tables contain data on AdventureWorks employees, sales territories, customers, and orders placed by the customers.
End of explanation
"""
Employees.head()
Territory.head()
Customers.head()
Orders.head()
"""
Explanation: Let's take a look at the data we'll be working with:
End of explanation
"""
help(pd.concat)
# constructing the territory tables... as noted, this is an artificial example
TerritoryUSA = Territory[Territory.CountryCode=='US']; TerritoryUSA['RepID'] = np.random.randint(1,1000,5)
TerritoryWorld = Territory[Territory.CountryCode!='US']
TerritoryUSA
TerritoryWorld
# we'll concatenate the databases, but keep separate keys so that we can keep track of which entries came from AdventuresUSA and
# which from AdventuresWorld.
# We'll use "join='inner'" to only keep colunms that are common to both tables;
# that is, we will drop the no-longer needed RepID in AdventuresUSA.
Territory2 = pd.concat([TerritoryUSA, TerritoryWorld], keys=['usa', 'world'], join='inner')
Territory2
"""
Explanation: Let's construct a slightly artificial example. Suppose that AdventureWorks was formed by merging two companies, AdventuresUSA which operated in the US and AdventuresWorld, which operated in other countries. Now we want information on their combined sales territories.
The Pandas "concat" function is good for stacking tables on top of each other. We will use it to combine the AdventuresUSA and AdventuresWorld territories data tables.
End of explanation
"""
help(pd.DataFrame.append)
Territory3 = TerritoryUSA.append(TerritoryWorld)
Territory3
"""
Explanation: Pandas "append" behaves just like "concat" with axis=0 and join='outer' (i.e., keep all column names). Missing values are set to NaN.
End of explanation
"""
help(pd.merge)
Ans = pd.merge(Employees.loc[:,["EmployeeID","FirstName","MiddleName","LastName","JobTitle","TerritoryID"]],
Territory,
how='left', on='TerritoryID')
Ans.head()
# Overachiever answer:
Ans['EmployeeName'] = Ans[["FirstName","MiddleName","LastName"]].apply(lambda x: x.LastName+", "+x.FirstName+" "+str(x.MiddleName), axis=1)
Ans = Ans[['EmployeeName', 'EmployeeID', 'JobTitle', 'TerritoryID', 'Name', 'CountryCode', 'Region', 'SalesYTD', 'SalesLastYear']]
Ans
"""
Explanation: Joining and merging tables in Pandas
Join and merge are powerful tools for working with multiple tables. We will use them to answer some questions about the
AdventureWorks dataset that you might encounter in real-life situations.
Join does fast table joining on a shared index.
Merge does the same thing, but gives you the option to specify columns to join on.
The idea of joining on a column will become clearer with some examples.
Example 1. "I want a list of all employees, and if any are salespeople then show me the details about their sales territory"
From AdventureWorks, we have a table "Employees" that gives a lot of information about AdventureWorks employees, like 'EmployeeID', 'ManagerID', 'TerritoryID', 'Title', 'FirstName','MiddleName', 'LastName', 'Suffix', 'JobTitle', 'NationalIDNumber', 'BirthDate', 'MaritalStatus', 'Gender', 'HireDate', 'SalariedFlag', 'VacationHours', 'SickLeaveHours', 'PhoneNumber', 'PhoneNumberType', 'EmailAddress', 'AddressLine1', 'AddressLine2', 'City', 'StateProvinceName', 'PostalCode', 'CountryName'. \
Since we're just being asked for a list of employees, we'll give the EmployeeID and their first, middle, and last names, and their role in the company (since additional information is requested for salespeople only). Then, for the salespeople, we must attach information about their sales territories, which is contained in the Territories table.
Notice that the Employees table has a column 'TerritoryID', which corresponds to the primary key in the 'Territory' table (in 'Territory', each territory has a unique 'TerritoryID'). We'll do a join on TerritoryID.
End of explanation
"""
Ans2 = Ans[Ans.JobTitle=='Sales Representative']
Ans2
# Overachiever: What about *all* employees associated with sales?
Ans2 = Ans[Ans["JobTitle"].apply(lambda x: 'Sales' in x)]
Ans2
"""
Explanation: "For the list above, limit the results to just salespeople"
End of explanation
"""
Ans3 = pd.merge(Customers[["CustomerID","FirstName","LastName","SalesTerritoryID"]],
Territory[["TerritoryID","Name"]],
how='left',
left_on='SalesTerritoryID', right_on='TerritoryID', )
Ans3
"""
Explanation: "Give me a list of our customers, and also tell me which sales territory they fall in."
This looks like another question for "merge"! We have a list of customers with their addresses, and we have a list of territories, but they are in separate tables.
Let's recover a list of customer names and IDs, together with corresponding sales territory names.
This time, we have to be careful, because "TerritoryID" in the Territory table matches "SalesTerritoryID" in the table Customers. So, we'll have to specify different columns names to merge on for the two tables.
End of explanation
"""
Ans = pd.merge(Territory, Customers, how="inner", left_on="TerritoryID", right_on="SalesTerritoryID")
Ans
"""
Explanation: "Give me a list of all sales territories, also show what customers fall under them"
End of explanation
"""
# In-class exercise! :)
Customers[Customers.StateName=="North Carolina"].CustomerID.count()
"""
Explanation: "Give me a list of the customers we have in North Carolina, and tell me how many there are."
End of explanation
"""
# We'll use the Orders table for this! In-class exercise :)
Orders['TotalItemPrice'] = Orders.Quantity * Orders.Price
"""
Explanation: "For each of the items ordered, show the total price (sometimes they ordered more than 1 item)"
End of explanation
"""
# In-class exercise! :)
pd.merge(Customers[["FirstName","LastName","CustomerID"]],Orders[["CustomerID","TotalItemPrice"]], how="inner", on="CustomerID").groupby(["CustomerID","FirstName","LastName"]).sum()
"""
Explanation: "Show a list of customers, and the total amount of money they have spent with AdventureWorks. I want the highest spenders to appear first!"
End of explanation
"""
help(pd.DataFrame.combine_first)
help(pd.DataFrame.update)
Customers
"""
Explanation: Another side note:
End of explanation
"""
|
jasdumas/jasdumas.github.io | post_data/final_project_jasmine_dumas.ipynb | mit | ## load libraries
import sys
from numpy import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import operator
%matplotlib inline
from sklearn.feature_extraction import DictVectorizer
from sklearn import preprocessing
from sklearn import neighbors, tree, naive_bayes
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
"""
Explanation: Final Project
Jasmine Dumas (1523905)
CSC 478: Programming Machine Learning Applications - Autumn 2016
Due: Tuesday, November 22, 2016
Final Project Objective:
Analyze Lending Club's issued loans: https://www.kaggle.com/wendykan/lending-club-loan-data
Data Analysis Tasks:
Supervised Learning: Classifier using k Nearest Neighbor of payment status (Current, Late, Fully Paid, etc.)
Exploratory Data Analysis
Pre-processing & Data Cleaning
Building the Classifier
Evaluating the model
1. Load Libraries
End of explanation
"""
data = pd.read_csv("loan.csv", low_memory=False)
"""
Explanation: 2. Load the data
End of explanation
"""
# 5% of the data without replacement
data = data.sample(frac=0.05, replace=False, random_state=123)
"""
Explanation: a. Data reduction for computation
From previous attempts to create a model matrix below and having the kernal crash, I'm going to reduce the data set size to compute better by selecting a random sample of 20% from the original dataset
End of explanation
"""
data.shape
data.head(n=5)
data.columns
"""
Explanation: 3. Explore the data
visaully and descriptive methods
End of explanation
"""
pd.unique(data['loan_status'].values.ravel())
print("Amount of Classes: ", len(pd.unique(data['loan_status'].values.ravel())))
len(pd.unique(data['zip_code'].values.ravel())) # want to make sure this was not too unique
len(pd.unique(data['url'].values.ravel())) # drop url
len(pd.unique(data['last_pymnt_d'].values.ravel()))
len(pd.unique(data['next_pymnt_d'].values.ravel()))
for col in data.select_dtypes(include=['object']).columns:
print ("Column {} has {} unique instances".format( col, len(data[col].unique())) )
"""
Explanation: The loan_status column is the target!
a. How many classes are there?
End of explanation
"""
len(pd.unique(data['member_id'].values.ravel())) == data.shape[0]
"""
Explanation: b. Are there unique customers in the data or repeats?
End of explanation
"""
data = data.drop('id', 1) #
data = data.drop('member_id', 1)#
data = data.drop('url', 1)#
data = data.drop('purpose', 1)
data = data.drop('title', 1)#
data = data.drop('zip_code', 1)#
data = data.drop('emp_title', 1)#
data = data.drop('earliest_cr_line', 1)#
data = data.drop('term', 1)
data = data.drop('sub_grade', 1) #
data = data.drop('last_pymnt_d', 1)#
data = data.drop('next_pymnt_d', 1)#
data = data.drop('last_credit_pull_d', 1)
data = data.drop('issue_d', 1) ##
data = data.drop('desc', 1)##
data = data.drop('addr_state', 1)##
data.shape
# yay this is better
for col in data.select_dtypes(include=['object']).columns:
print ("Column {} has {} unique instances".format( col, len(data[col].unique())) )
"""
Explanation: c. Drop some of the junk variables (id, member_id, ...)
Reasons: High Cardinality
pre-pre-processing 😃
End of explanation
"""
data['loan_amnt'].plot(kind="hist", bins=10)
data['grade'].value_counts().plot(kind='bar')
data['emp_length'].value_counts().plot(kind='bar')
"""
Explanation: d. Exploratory Data Analysis: What is the distribution of the loan amount?
In general the loans amount was usually under $15,000
End of explanation
"""
data['loan_status'].value_counts().plot(kind='bar')
"""
Explanation: e. What is the distribution of target class?
Most of this dataset the loans are in a current state (in-payment?), or Fully paid off
Looks like a Poisson Distribution?!
End of explanation
"""
data._get_numeric_data().columns
"There are {} numeric columns in the data set".format(len(data._get_numeric_data().columns) )
"""
Explanation: f. What are the numeric columns?
For pre-processing and scaling
End of explanation
"""
data.select_dtypes(include=['object']).columns
"There are {} Character columns in the data set (minus the target)".format(len(data.select_dtypes(include=['object']).columns) -1)
"""
Explanation: g. What are the character columns?
For one-hot encoding into a model matrix
End of explanation
"""
X = data.drop("loan_status", axis=1, inplace = False)
y = data.loan_status
y.head()
"""
Explanation: 4. Pre-processing the data
a. Remove the target from the entire dataset
End of explanation
"""
def model_matrix(df , columns):
dummified_cols = pd.get_dummies(df[columns])
df = df.drop(columns, axis = 1, inplace=False)
df_new = df.join(dummified_cols)
return df_new
X = model_matrix(X, ['grade', 'emp_length', 'home_ownership', 'verification_status',
'pymnt_plan', 'initial_list_status', 'application_type', 'verification_status_joint'])
# 'issue_d' 'desc' 'addr_state'
X.head()
X.shape
"""
Explanation: b. Transform the data into a model matrix with one-hot encoding
isolate the variables of char class
End of explanation
"""
# impute rows with NaN with a 0 for now
X2 = X.fillna(value = 0)
X2.head()
from sklearn.preprocessing import MinMaxScaler
Scaler = MinMaxScaler()
X2[['loan_amnt', 'funded_amnt', 'funded_amnt_inv', 'int_rate',
'installment', 'annual_inc', 'dti', 'delinq_2yrs', 'inq_last_6mths',
'mths_since_last_delinq', 'mths_since_last_record', 'open_acc',
'pub_rec', 'revol_bal', 'revol_util', 'total_acc', 'out_prncp',
'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp',
'total_rec_int', 'total_rec_late_fee', 'recoveries',
'collection_recovery_fee', 'last_pymnt_amnt',
'collections_12_mths_ex_med', 'mths_since_last_major_derog',
'policy_code', 'annual_inc_joint', 'dti_joint', 'acc_now_delinq',
'tot_coll_amt', 'tot_cur_bal', 'open_acc_6m', 'open_il_6m',
'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il',
'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util',
'total_rev_hi_lim', 'inq_fi', 'total_cu_tl', 'inq_last_12m']] = Scaler.fit_transform(X2[['loan_amnt', 'funded_amnt', 'funded_amnt_inv', 'int_rate',
'installment', 'annual_inc', 'dti', 'delinq_2yrs', 'inq_last_6mths',
'mths_since_last_delinq', 'mths_since_last_record', 'open_acc',
'pub_rec', 'revol_bal', 'revol_util', 'total_acc', 'out_prncp',
'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp',
'total_rec_int', 'total_rec_late_fee', 'recoveries',
'collection_recovery_fee', 'last_pymnt_amnt',
'collections_12_mths_ex_med', 'mths_since_last_major_derog',
'policy_code', 'annual_inc_joint', 'dti_joint', 'acc_now_delinq',
'tot_coll_amt', 'tot_cur_bal', 'open_acc_6m', 'open_il_6m',
'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il',
'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util',
'total_rev_hi_lim', 'inq_fi', 'total_cu_tl', 'inq_last_12m']])
X2.head()
"""
Explanation: c. Scale the continuous variables use min max calculation
End of explanation
"""
x_train, x_test, y_train, y_test = train_test_split(X2, y, test_size=.3, random_state=123)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
"""
Explanation: d. Partition the data into train and testing
End of explanation
"""
# start out with the number of classes for neighbors
data_knn = KNeighborsClassifier(n_neighbors = 10, metric='euclidean')
data_knn
data_knn.fit(x_train, y_train)
"""
Explanation: 5. Building the k Nearest Neighbor Classifier
experiment with different values for neighbors
End of explanation
"""
data_knn.predict(x_test)
"""
Explanation: a. predict on the test data using the knn model created above
End of explanation
"""
# R-square from training and test data
rsquared_train = data_knn.score(x_train, y_train)
rsquared_test = data_knn.score(x_test, y_test)
print ('Training data R-squared:')
print(rsquared_train)
print ('Test data R-squared:')
print(rsquared_test)
"""
Explanation: b. Evaluating the classifier model using R squared
End of explanation
"""
# confusion matrix
from sklearn.metrics import confusion_matrix
knn_confusion_matrix = confusion_matrix(y_true = y_test, y_pred = data_knn.predict(x_test))
print("The Confusion matrix:\n", knn_confusion_matrix)
# visualize the confusion matrix
# http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
plt.matshow(knn_confusion_matrix, cmap = plt.cm.Blues)
plt.title("KNN Confusion Matrix\n")
#plt.xticks([0,1], ['No', 'Yes'])
#plt.yticks([0,1], ['No', 'Yes'])
plt.ylabel('True label')
plt.xlabel('Predicted label')
for y in range(knn_confusion_matrix.shape[0]):
for x in range(knn_confusion_matrix.shape[1]):
plt.text(x, y, '{}'.format(knn_confusion_matrix[y, x]),
horizontalalignment = 'center',
verticalalignment = 'center',)
plt.show()
"""
Explanation: c. Confusion Matrix
End of explanation
"""
#Generate the classification report
from sklearn.metrics import classification_report
knn_classify_report = classification_report(y_true = y_test,
y_pred = data_knn.predict(x_test))
print(knn_classify_report)
"""
Explanation: d. Classification Report
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/inm/cmip6/models/sandbox-2/atmos.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'inm', 'sandbox-2', 'atmos')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmos
MIP Era: CMIP6
Institute: INM
Source ID: SANDBOX-2
Topic: Atmos
Sub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos.
Properties: 156 (127 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:05
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties --> Overview
2. Key Properties --> Resolution
3. Key Properties --> Timestepping
4. Key Properties --> Orography
5. Grid --> Discretisation
6. Grid --> Discretisation --> Horizontal
7. Grid --> Discretisation --> Vertical
8. Dynamical Core
9. Dynamical Core --> Top Boundary
10. Dynamical Core --> Lateral Boundary
11. Dynamical Core --> Diffusion Horizontal
12. Dynamical Core --> Advection Tracers
13. Dynamical Core --> Advection Momentum
14. Radiation
15. Radiation --> Shortwave Radiation
16. Radiation --> Shortwave GHG
17. Radiation --> Shortwave Cloud Ice
18. Radiation --> Shortwave Cloud Liquid
19. Radiation --> Shortwave Cloud Inhomogeneity
20. Radiation --> Shortwave Aerosols
21. Radiation --> Shortwave Gases
22. Radiation --> Longwave Radiation
23. Radiation --> Longwave GHG
24. Radiation --> Longwave Cloud Ice
25. Radiation --> Longwave Cloud Liquid
26. Radiation --> Longwave Cloud Inhomogeneity
27. Radiation --> Longwave Aerosols
28. Radiation --> Longwave Gases
29. Turbulence Convection
30. Turbulence Convection --> Boundary Layer Turbulence
31. Turbulence Convection --> Deep Convection
32. Turbulence Convection --> Shallow Convection
33. Microphysics Precipitation
34. Microphysics Precipitation --> Large Scale Precipitation
35. Microphysics Precipitation --> Large Scale Cloud Microphysics
36. Cloud Scheme
37. Cloud Scheme --> Optical Cloud Properties
38. Cloud Scheme --> Sub Grid Scale Water Distribution
39. Cloud Scheme --> Sub Grid Scale Ice Distribution
40. Observation Simulation
41. Observation Simulation --> Isscp Attributes
42. Observation Simulation --> Cosp Attributes
43. Observation Simulation --> Radar Inputs
44. Observation Simulation --> Lidar Inputs
45. Gravity Waves
46. Gravity Waves --> Orographic Gravity Waves
47. Gravity Waves --> Non Orographic Gravity Waves
48. Solar
49. Solar --> Solar Pathways
50. Solar --> Solar Constant
51. Solar --> Orbital Parameters
52. Solar --> Insolation Ozone
53. Volcanos
54. Volcanos --> Volcanoes Treatment
1. Key Properties --> Overview
Top level key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmosphere model code (CAM 4.0, ARPEGE 3.2,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of atmospheric model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the atmosphere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Resolution
Characteristics of the model resolution
2.1. Horizontal Resolution Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.4. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on the computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 2.5. High Top
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping
Characteristics of the atmosphere model time stepping
3.1. Timestep Dynamics
Is Required: TRUE Type: STRING Cardinality: 1.1
Timestep for the dynamics, e.g. 30 min.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.2. Timestep Shortwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the shortwave radiative transfer, e.g. 1.5 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestep Longwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the longwave radiative transfer, e.g. 3 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Orography
Characteristics of the model orography
4.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the orography.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
"""
Explanation: 4.2. Changes
Is Required: TRUE Type: ENUM Cardinality: 1.N
If the orography type is modified describe the time adaptation changes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid --> Discretisation
Atmosphere grid discretisation
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of grid discretisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Discretisation --> Horizontal
Atmosphere discretisation in the horizontal
6.1. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
"""
Explanation: 6.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.3. Scheme Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation function order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.4. Horizontal Pole
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal discretisation pole singularity treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.5. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Discretisation --> Vertical
Atmosphere discretisation in the vertical
7.1. Coordinate Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Type of vertical coordinate system
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Dynamical Core
Characteristics of the dynamical core
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere dynamical core
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the dynamical core of the model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Timestepping Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Timestepping framework type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of the model prognostic variables
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9. Dynamical Core --> Top Boundary
Type of boundary layer at the top of the model
9.1. Top Boundary Condition
Is Required: TRUE Type: ENUM Cardinality: 1.1
Top boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Top Heat
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary heat treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Top Wind
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary wind treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Dynamical Core --> Lateral Boundary
Type of lateral boundary condition (if the model is a regional model)
10.1. Condition
Is Required: FALSE Type: ENUM Cardinality: 0.1
Type of lateral boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Dynamical Core --> Diffusion Horizontal
Horizontal diffusion scheme
11.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Horizontal diffusion scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal diffusion scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12. Dynamical Core --> Advection Tracers
Tracer advection scheme
12.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Tracer advection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.3. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.4. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracer advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Dynamical Core --> Advection Momentum
Momentum advection scheme
13.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Momentum advection schemes name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.3. Scheme Staggering Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme staggering type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.4. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Radiation
Characteristics of the atmosphere radiation process
14.1. Aerosols
Is Required: TRUE Type: ENUM Cardinality: 1.N
Aerosols whose radiative effect is taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Radiation --> Shortwave Radiation
Properties of the shortwave radiation scheme
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of shortwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Shortwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Shortwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Shortwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Radiation --> Shortwave GHG
Representation of greenhouse gases in the shortwave radiation scheme
16.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Radiation --> Shortwave Cloud Ice
Shortwave radiative properties of ice crystals in clouds
17.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Radiation --> Shortwave Cloud Liquid
Shortwave radiative properties of liquid droplets in clouds
18.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Radiation --> Shortwave Cloud Inhomogeneity
Cloud inhomogeneity in the shortwave radiation scheme
19.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Radiation --> Shortwave Aerosols
Shortwave radiative properties of aerosols
20.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21. Radiation --> Shortwave Gases
Shortwave radiative properties of gases
21.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Radiation --> Longwave Radiation
Properties of the longwave radiation scheme
22.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of longwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the longwave radiation scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Longwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Longwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 22.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Longwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiation --> Longwave GHG
Representation of greenhouse gases in the longwave radiation scheme
23.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24. Radiation --> Longwave Cloud Ice
Longwave radiative properties of ice crystals in clouds
24.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.2. Physical Reprenstation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Radiation --> Longwave Cloud Liquid
Longwave radiative properties of liquid droplets in clouds
25.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Radiation --> Longwave Cloud Inhomogeneity
Cloud inhomogeneity in the longwave radiation scheme
26.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Radiation --> Longwave Aerosols
Longwave radiative properties of aerosols
27.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 28. Radiation --> Longwave Gases
Longwave radiative properties of gases
28.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Turbulence Convection
Atmosphere Convective Turbulence and Clouds
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere convection and turbulence
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30. Turbulence Convection --> Boundary Layer Turbulence
Properties of the boundary layer turbulence scheme
30.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Boundary layer turbulence scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Boundary layer turbulence scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Closure Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Boundary layer turbulence scheme closure order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Counter Gradient
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Uses boundary layer turbulence scheme counter gradient
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 31. Turbulence Convection --> Deep Convection
Properties of the deep convection scheme
31.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Deep convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of deep convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Turbulence Convection --> Shallow Convection
Properties of the shallow convection scheme
32.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Shallow convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
shallow convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
"""
Explanation: 32.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
shallow convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33. Microphysics Precipitation
Large Scale Cloud Microphysics and Precipitation
33.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of large scale cloud microphysics and precipitation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Microphysics Precipitation --> Large Scale Precipitation
Properties of the large scale precipitation scheme
34.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the large scale precipitation parameterisation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 34.2. Hydrometeors
Is Required: TRUE Type: ENUM Cardinality: 1.N
Precipitating hydrometeors taken into account in the large scale precipitation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Properties of the large scale cloud microphysics scheme
35.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the microphysics parameterisation scheme used for large scale clouds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 35.2. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Large scale cloud microphysics processes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36. Cloud Scheme
Characteristics of the cloud scheme
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the atmosphere cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
"""
Explanation: 36.3. Atmos Coupling
Is Required: FALSE Type: ENUM Cardinality: 0.N
Atmosphere components that are linked to the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.4. Uses Separate Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Different cloud schemes for the different types of clouds (convective, stratiform and boundary layer)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Processes included in the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.6. Prognostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a prognostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.7. Diagnostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a diagnostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.8. Prognostic Variables
Is Required: FALSE Type: ENUM Cardinality: 0.N
List the prognostic variables used by the cloud scheme, if applicable.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 37. Cloud Scheme --> Optical Cloud Properties
Optical cloud properties
37.1. Cloud Overlap Method
Is Required: FALSE Type: ENUM Cardinality: 0.1
Method for taking into account overlapping of cloud layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.2. Cloud Inhomogeneity
Is Required: FALSE Type: STRING Cardinality: 0.1
Method for taking into account cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Sub-grid scale water distribution
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale water distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 38.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale water distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 38.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale water distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 38.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale water distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Sub-grid scale ice distribution
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale ice distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 39.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale ice distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 39.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale ice distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 39.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale ice distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 40. Observation Simulation
Characteristics of observation simulation
40.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of observation simulator characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41. Observation Simulation --> Isscp Attributes
ISSCP Characteristics
41.1. Top Height Estimation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator ISSCP top height estimation methodUo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41.2. Top Height Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator ISSCP top height direction
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 42. Observation Simulation --> Cosp Attributes
CFMIP Observational Simulator Package attributes
42.1. Run Configuration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator COSP run configuration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.2. Number Of Grid Points
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of grid points
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.3. Number Of Sub Columns
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of sub-cloumns used to simulate sub-grid variability
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.4. Number Of Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of levels
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 43. Observation Simulation --> Radar Inputs
Characteristics of the cloud radar simulator
43.1. Frequency
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Cloud simulator radar frequency (Hz)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 43.2. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator radar type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.3. Gas Absorption
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses gas absorption
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.4. Effective Radius
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses effective radius
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44. Observation Simulation --> Lidar Inputs
Characteristics of the cloud lidar simulator
44.1. Ice Types
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator lidar ice type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44.2. Overlap
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator lidar overlap
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 45. Gravity Waves
Characteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources.
45.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of gravity wave parameterisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.2. Sponge Layer
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sponge layer in the upper levels in order to avoid gravity wave reflection at the top.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.3. Background
Is Required: TRUE Type: ENUM Cardinality: 1.1
Background wave distribution
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.4. Subgrid Scale Orography
Is Required: TRUE Type: ENUM Cardinality: 1.N
Subgrid scale orography effects taken into account.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 46. Gravity Waves --> Orographic Gravity Waves
Gravity waves generated due to the presence of orography
46.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 47. Gravity Waves --> Non Orographic Gravity Waves
Gravity waves generated by non-orographic processes.
47.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the non-orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
"""
Explanation: 47.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 48. Solar
Top of atmosphere solar insolation characteristics
48.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of solar insolation of the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 49. Solar --> Solar Pathways
Pathways for solar forcing of the atmosphere
49.1. Pathways
Is Required: TRUE Type: ENUM Cardinality: 1.N
Pathways for the solar forcing of the atmosphere model domain
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 50. Solar --> Solar Constant
Solar constant and top of atmosphere insolation characteristics
50.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the solar constant.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 50.2. Fixed Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If the solar constant is fixed, enter the value of the solar constant (W m-2).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 50.3. Transient Characteristics
Is Required: TRUE Type: STRING Cardinality: 1.1
solar constant transient characteristics (W m-2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 51. Solar --> Orbital Parameters
Orbital parameters and top of atmosphere insolation characteristics
51.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 51.2. Fixed Reference Date
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Reference date for fixed orbital parameters (yyyy)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 51.3. Transient Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Description of transient orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 51.4. Computation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used for computing orbital parameters.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 52. Solar --> Insolation Ozone
Impact of solar insolation on stratospheric ozone
52.1. Solar Ozone Impact
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does top of atmosphere insolation impact on stratospheric ozone?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 53. Volcanos
Characteristics of the implementation of volcanoes
53.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the implementation of volcanic effects in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 54. Volcanos --> Volcanoes Treatment
Treatment of volcanoes in the atmosphere
54.1. Volcanoes Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How volcanic effects are modeled in the atmosphere.
End of explanation
"""
|
adolfoguimaraes/machinelearning | Introduction/Tutorial01_HelloWorld.ipynb | mit | # Vamos transformar as informações textuais em números: (0) irregular, (1) Suave.
# Os labels também serão transformados em números: (0) Maçã e (1) Laranja
features = [[140, 1], [130, 1], [150, 0], [170, 0]]
labels = [0, 0, 1, 1]
"""
Explanation: Tutorial 01 - Hello World em Aprendizagem de Máquina
Para começar o nosso estudo de aprendizagem de máquina vamos começar com um exemplo simples de aprendizagem. O objetivo aqui é entender o que é Aprendizagem de Máquina e como podemos usá-la. Não serão apresentados detalhes dos métodos aplicados, eles serão explicados ao longo do curso.
O material do curso será baseado no curso Intro to Machine Learning da Udacity e também no conteúdo de alguns livros:
[1]: Inteligência Artificial. Uma Abordagem de Aprendizado de Máquina (FACELI et. al, 2011)
[2]: Machine Learning: An Algorithmic Perspective, Second Edition (MARSLAND et. al, 2014)
[3]: Redes Neurais Artificiais Para Engenharia e Ciências Aplicadas. Fundamentos Teóricos e Aspectos Práticos (da SILVA I., 2016)
[4]: An Introduction to Statistical Learning with Applications in R (JAMES, G. et al, 2015)
Em termos de linguagem de programação, usaremos o Python e as bibliotecas do ScikitLearn e do Tensorflow. Bibliotecas auxiliares como Pandas, NumPy, Scipy, MatPlotLib dentre outras também serão necessárias.
O material dessa primeira aula é baseado em dois vídeos:
Hello World - Machine Learning Recipes #1 (by Josh Gordon - Google)
Visualizing a Decision Tree - Machine Learning Recipes #2 (by Josh Gordon - Google)
Vamos Começar :)
O primeiro passo é entender o que é Aprendizagem de Máquina (em inglês, Machine Learning). Uma definição que consta em [2] é a seguinte:
Machine Learning, then, is about making computers modify or adapt their actions (whether theses actions are making predictions, or controlling a robot) so that these actions get more accurate, where accuracy is measured by how well the chosen actions reflect the correct ones.
Podemos enxergar a aprendizagem de máquina como sendo um campo da Inteligência Artificial que visa prover os computadores a capacidade de modificar e adaptar as sua ações de acordo com o problema e, ao longo do processo, melhorar o seu desempenho.
É nessa área que se encontra a base de sistemas que usamos no dia a dia como:
Sistemas de tradução automática
Sistemas de recomendação de filmes
Assistentes pessoais como a Siri
Dentre tantas outras aplicações que serão detalhadas ao longo do curso.
Todos esses sistemas são possíveis graças a um amplo estudo de uma série de algoritmos que compoõe a aprendizagem de máquina. Existem disversas formas de classficiar esse conjunto de algoritmos. Uma forma simples é dividi-los em 4 grupos. Citando [2], temos:
Aprendizado Supervisionado (Supervised Learning): A training set of examples with the correct responses (targets) is provided and, based on this training set, the algorithm generalises to respond correctly to all possible inputs. This also colled learning from exemplars.
Aprendizado Não-Supervisionado (Unsupervised Learning): Correct responses are not provided, but instead the algorithm tries to identify similarities between the inputs so that inputs that have something in common are categorised together. The statistical approach to unsupervised learning is known as density estimation.
Aprendizado por Reforço (Reinforcement Learning): This is somewhere between supervised and unsupervised learning. The algortithm gets told when the answer is wrong, but dows not get told how to correct it. It has to explore and try out different possibilities until it works out how to get the answer right. Reinforcement learning is sometime called learning with a critic because of this monitor that scores the answer, but does not suggest improvements.
Aprendizado Evolucionário (Evolutionary Learning): Biological evolution can be seen as a learning process: biological organisms adapt to improve their survival rates and chance of having offspring in their environment. We'll look at how we can model this in a computer, using an idea of fitness, which corresponds to a score for how good the current solution is.
Neste curso iremos explorar alguns dos principais algoritmos de cada um dos grupos.
Hello World
Para começar vamos tentar entender um pouco de como funciona o processo que será tratado nos algoritmos com uma tarefa simples de classificação. A classificação é uma das técnicas de aprendizado supervisionado e consiste em dado um conjunto de dados, você deve classificar cada instância deste conjunto em uma classe. Isso será tema do próximo tutorial e será melhor detalhado.
Para simplificar, imagine a seguinte tarefa: desejo construir um programa que classifica laranjas e maças. Para entender o problema, assista: https://www.youtube.com/watch?v=cKxRvEZd3Mw
É fácil perceber que não podemos simplesmente programar todas as variações de características que temos em relação à maças e laranjas. No entanto, podemos aprender padrões que caracterizam uma maça e uma laranja. Se uma nova fruta for passada ao programa, a presença ou não desses padrões permitirá classifica-la em maça, laranja ou outra fruta.
Vamos trabalhar com uma base de dados de exemplo que possui dados coletados com características de laranjas e maças. Para simplificar, vamos trabalhar com duas características: peso e textura. Em aprendizagem de máquina, as caracterísicas que compõe nosso conjunto de dados são chamadas de features.
Peso | Textura | Classe (label)
------------ | ------------- | -------------
150g | Irregular | Laranja
170g | Irregular | Laranja
140g | Suave | Maçã
130g | Suave | Maçã
Cada linha da nossa base de dados é chamada de instância (examples). Cada exemplo é classificado de acordo com um label ou classe. Nesse caso, iremos trabalhar com duas classes que são os tipos de frutas.
Toda a nossa tabela são os dados de treinamento. Entenda esses dados como aqueles que o nosso programa irá usar para aprender. De forma geral e bem simplificada, quanto mais dados nós tivermos, melhor o nosso programa irá aprender.
Vamos simular esse problema no código.
End of explanation
"""
from sklearn import tree
clf = tree.DecisionTreeClassifier()
"""
Explanation: Vamos agora criar um modelo baseado nesse conjunto de dados. Vamos utilizar o algoritmo de árvore de decisão para fazer isso.
End of explanation
"""
clf = clf.fit(features, labels)
"""
Explanation: clf consiste no classificador baseado na árvore de decisão. Precisamos treina-lo com o conjunto da base de dados de treinamento.
End of explanation
"""
# Peso 160 e Textura Irregular. Observe que esse tipo de fruta não está presente na base de dados.
print(clf.predict([[160, 0]]))
"""
Explanation: Observer que o classificador recebe com parâmetro as features e os labels. Esse classificador é um tipo de classificador supervisionado, logo precisa conhecer o "gabarito" das instâncias que estão sendo passadas.
Uma vez que temos o modelo construído, podemos utiliza-lo para classificar uma instância desconhecida.
End of explanation
"""
from sklearn.datasets import load_iris
dataset_iris = load_iris()
"""
Explanation: Ele classificou essa fruta como sendo uma Laranja.
HelloWorld++
Vamos estender um pouco mais esse HelloWorld. Claro que o exemplo anterior foi só para passar a idéia de funcionamento de um sistema desse tipo. No entanto, o nosso programa não está aprendendo muita coisa já que a quantidade de exemplos passada para ele é muito pequena. Vamos trabalhar com um exemplo um pouco maior.
Para esse exemplo, vamos utilizar o Iris Dataset. Esse é um clássico dataset utilizado na aprendizagem de máquina. Ele tem o propósito mais didático e a tarefa é classificar 3 espécies de um tipo de flor (Iris). A classificação é feita a partir de 4 características da planta: sepal length, sepal width, petal length e petal width.
<img src="http://5047-presscdn.pagely.netdna-cdn.com/wp-content/uploads/2015/04/iris_petal_sepal.png" />
As flores são classificadas em 3 tipos: Iris Setosa, Iris Versicolor e Iris Virginica.
Vamos para o código ;)
O primeiro passo é carregar a base de dados. Os arquivos desta base estão disponíveis no UCI Machine Learning Repository. No entanto, como é uma base bastante utilizada, o ScikitLearn permite importá-la diretamente da biblioteca.
End of explanation
"""
print(dataset_iris.feature_names)
"""
Explanation: Imprimindo as características:
End of explanation
"""
print(dataset_iris.target_names)
"""
Explanation: Imprimindo os labels:
End of explanation
"""
print(dataset_iris.data)
# Nessa lista, 0 = setosa, 1 = versicolor e 2 = verginica
print(dataset_iris.target)
"""
Explanation: Imprimindo os dados:
End of explanation
"""
# Verifique os tipos das features e das classes
print(type(dataset_iris.data))
print(type(dataset_iris.target))
# Verifique o tamanho das features (primeira dimensão = numero de instâncias, segunda dimensão = número de atributos)
print(dataset_iris.data.shape)
# Verifique o tamanho dos labels
print(dataset_iris.target.shape)
"""
Explanation: Antes de continuarmos, vale a pena mostrar que o Scikit-Learn exige alguns requisitos para se trabalhar com os dados. Esse tutorial não tem como objetivo fazer um estudo detalhado da biblioteca, mas é importante tomar conhecimento de tais requisitos para entender alguns exemplos que serão mostrados mais à frente. São eles:
As features e os labels devem ser armazenados em objetos distintos
Ambos devem ser numéricos
Ambos devem ser representados por uma Array Numpy
Ambos devem ter tamanhos específicos
Vamos ver estas informações no Iris-dataset.
End of explanation
"""
X = dataset_iris.data
Y = dataset_iris.target
"""
Explanation: Quando importamos a base diretamente do ScikitLearn, as features e labels já vieram em objetos distintos. Só por questão de simplificação dos nomes, vou renomeá-los.
End of explanation
"""
import numpy as np
# Determinando os índices que serão retirados da base de treino para formar a base de teste
test_idx = [0, 50, 100] # as instâncias 0, 50 e 100 da base de dados
# Criando a base de treino
train_target = np.delete(dataset_iris.target, test_idx)
train_data = np.delete(dataset_iris.data, test_idx, axis=0)
# Criando a base de teste
test_target = dataset_iris.target[test_idx]
test_data = dataset_iris.data[test_idx]
print("Tamanho dos dados originais: ", dataset_iris.data.shape) #np.delete não modifica os dados originais
print("Tamanho do treinamento: ", train_data.shape)
print("Tamanho do teste: ", test_data.shape)
"""
Explanation: Construindo e testando um modelo de treinamento
Uma vez que já temos nossa base de dados, o próximo passo é construir nosso modelo de aprendizagem de máquina capaz de utilizar o dataset. No entanto, antes de construirmos nosso modelo é preciso saber qual modelo desenvolver e para isso precisamos definir qual o nosso propósito na tarefa de treinamento.
Existem vários tipos de tarefas dentro da aprendizagem de máquina. Como dito anteriormente, vamos trabalhar com a tarefa de classificação. A classificação consiste em criar um modelo a partir de dados que estejam de alguma forma classificados. O modelo gerado é capaz de determinar qual classe uma instância pertence a partir dos dados que foram dados como entrada.
Na apresentação do dataset da Iris vimos que cada instância é classificada com um tipo (no caso, o tipo da espécie a qual a planta pertence). Sendo assim, vamos tratar esse problema como um problema de classificação. Existem outras tarefas dentro da aprendizagem de máquina, como: clusterização, agrupamento, dentre outras. Mais detalhes de cada uma deles serão apresentados na aula de aprendizagem de máquina.
O passo seguinte é construir o modelo. Para tal, vamos seguir 4 passos:
Passo 1: Importar o classificador que deseja utilizar
Passo 2: Instanciar o modelo
Passo 3: Treinar o modelo
Passo 4: Fazer predições para novos valores
Nessa apresentação, vamos continuar utilizando o modelo de Árvore de Decisão. O fato de usá-la nesta etapa é que é fácil visualizar o que o modelo está fazendo com os dados.
Para nosso exemplo, vamos treinar o modelo com um conjunto de dados e, em seguida, vamos testá-lo com um conjunto de dados que não foram utilizados para treinar. Para isso, vamos retirar algumas instâncias da base de treinamento e usá-las posteriormente para testá-la. Vamos chamar isso de dividir a base em base de treino e base de teste. É fácil perceber que não faz sentido testarmos nosso modelo com um padrão que ele já conhece. Por isso, faz-se necessária essa separação.
End of explanation
"""
clf = tree.DecisionTreeClassifier()
clf.fit(train_data, train_target)
"""
Explanation: Agora que já temos nosso dataset separado, vamos criar o classificador e treina-lo com os dados de treinamento.
End of explanation
"""
print(clf.predict(test_data))
"""
Explanation: O classificador foi treinado, agora vamos utiliza-lo para classificar as instâncias da base de teste.
End of explanation
"""
print(test_target)
"""
Explanation: Como estamos trabalhando com o aprendizado supervisionado, podemos comparar com o target que já conhecemos da base de teste.
End of explanation
"""
from IPython.display import Image
import pydotplus
dot_data = tree.export_graphviz(clf, out_file=None,
feature_names=dataset_iris.feature_names,
class_names=dataset_iris.target_names,
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data)
Image(graph.create_png(), width=800)
"""
Explanation: Observe que, neste caso, nosso classificador teve uma acurácia de 100% acertando todas as instâncias informadas. Claro que esse é só um exemplo e normalmente trabalhamos com valores de acurácias menores que 100%. No entanto, vale ressaltar que para algumas tarefas, como reconhecimento de imagens, as taxas de acurácias estão bem próximas de 100%.
Visualizando nosso modelo
A vantagem em se trablhar com a árvore de decisão é que podemos visualizar exatamente o que modelo faz. De forma geral, uma árvore de decisão é uma árvore que permite serparar o conjunto de dados. Cada nó da árvore é "uma pergunta" que direciona aquela instância ao longo da árvore. Nos nós folha da árvore se encontram as classes. Esse tipo de modelo será mais detalhado mais a frente no nosso curso.
Para isso, vamos utilizar um código que visualiza a árvore gerada.
End of explanation
"""
print(test_data)
print(test_target)
"""
Explanation: Observe que nos nós internos pergunta sim ou não para alguma característica. Por exemplo, no nó raiz a pergunta é "pedal width é menor ou igual a 0.8". Isso significa que se a instância que estou querendo classificar possui pedal width menor que 0.8 ela será classificada como setosa. Se isso não for true ela será redirecionada para outro nó que irá analisar outra característica. Esse processo continua até que consiga atingir um nó folha. Como execício faça a classificação, acompahando na tabela, para as instâncias de testes.
End of explanation
"""
|
LSSTC-DSFP/LSSTC-DSFP-Sessions | Sessions/Session14/Day2/DeeplearningBlank.ipynb | mit | # this module contains our dataset
!pip install astronn
#this is pytorch, which we will use to build our nn
import torch
#Standards for plotting, math
import matplotlib.pyplot as plt
import numpy as np
#for our objective function
from sklearn.metrics import accuracy_score, confusion_matrix, ConfusionMatrixDisplay
"""
Explanation: Classification with a Multi-layer Perceptron (MLP)
Author: V. Ashley Villar
In this problem set, we will not be implementing neural networks from scratch. Yesterday, you built a perceptron in Python. Multi-layer perceptrons (MLPs) are, as discussed in the lecture, several layers of these perceptrons stacked. Here, we will learn how to use one of the most common modules for building neural networks: Pytorch
End of explanation
"""
from astroNN.datasets import load_galaxy10
from astroNN.datasets.galaxy10 import galaxy10cls_lookup
%matplotlib inline
#helpful functions:
#Load the images and labels as numbers
images, labels_original = load_galaxy10()
#convert numbers to a string
galaxy10cls_lookup(labels_original[0])
"""
Explanation: A few notes on Pytorch syntax
(Many thanks to Vanessa Bohm!!)
Pytorch datatype summary: The model expects a single precision input. You can change the type of a tensor with tensor_name.type(), where tensor_name is the name of your tensor and type is the dtype. For typecasting into single precision floating points, use float(). A numpy array is typecasted with array_name.astype(type). For single precision, the type should be np.float32.
Before we analyze tensors we often want to convert them to numpy arrays with tensor_name.numpy()
If pytorch has been tracking operations that resulted in the current tensor value, you need to detach the tensor from the graph (meaning you want to ignore things like its derivative) before you can transform it into a numpy array: tensor_name.detach(). Scalars can be detached with scalar.item()
Pytorch allows you to easily use your CPU or GPU; however, we are not using this feature. If you tensor is currently on the GPU, you can bring it onto the CPU with tensor_name.cpu()
Problem 1: Understanding the Data
For this problem set, we will use the Galaxy10 dataset made available via the astroNN module. This dataset is made up of 17736 images of galaxies which have been labelled by hand. See this link for more information.
First we will visualize our data.
Problem 1a Show one example of each class as an image.
End of explanation
"""
images_top_two = ...
labels_top_two = ...
"""
Explanation: Problem 2b Make a histogram showing the fraction of each class
Keep only the top two classes (i.e., the classes with the most galaxies)
End of explanation
"""
# This code converts from integer labels to 'one-hot encodings'. What does that term mean?
import torch.nn.functional as F
torch.set_default_dtype(torch.float)
labels_top_two_one_hot = F.one_hot(torch.tensor(labels_top_two - np.min(labels_top_two)).long(), num_classes=2)
images_top_two = torch.tensor(images_top_two).float()
labels_top_two_one_hot = labels_top_two_one_hot.float()
# we're going to flatten the images for our MLP
images_top_two_flat = ...
#Normalize the flux of the images here
images_top_two_flat_normed = ...
"""
Explanation: This next block of code converts the data to a format which is more compatible with our neural network.
End of explanation
"""
from sklearn.model_selection import train_test_split
"""
Explanation: Problem 2c Split the data into a training and test set (66/33 split) using the train_test_split function from sklearn
End of explanation
"""
class MLP(torch.nn.Module):
# this defines the model
def __init__(self, input_size, hidden_size):
super(MLP, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.hiddenlayer = torch.nn.Linear(self.input_size, self.hidden_size)
self.outputlayer = torch.nn.Linear(self.hidden_size, HOW_MANY_OUTPUTS)
# some nonlinear options
self.sigmoid = torch.nn.Sigmoid()
self.softmax = torch.nn.Softmax()
self.relu = torch.nn.ReLU()
def forward(self, x):
layer1 = self.hiddenlayer(x)
activation = self.sigmoid(layer1)
layer2 = self.outputlayer(activation)
output = self.NONLINEAR(layer2)
return output
"""
Explanation: The next cell will outline how one can make a MLP with pytorch.
Problem 3a Talk to a partner about how this code works, line by line. Add another hidden layer which is the same size as the first hidden layer. Choose an appropriate final nonlinear layer for this classification problem. Choose the appropriate number of outputs.
End of explanation
"""
# train the model
def train_model(training_data,training_labels, test_data,test_labels, model):
# define the optimization
criterion = torch.nn.BCELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1,momentum=0.9)
# Increase the number of epochs for your "final" run
for epoch in range(10):
# clear the gradient
optimizer.zero_grad()
# compute the model output
myoutput = model(training_data)
# calculate loss
loss = criterion(myoutput, training_labels)
# credit assignment
loss.backward()
# update model weights
optimizer.step()
# ADD PLOT
"""
Explanation: The next block of code will show how one can train the model for 100 epochs. Note that we use the binary cross-entropy as our objective function and stochastic gradient descent as our optimization method.
Problem 3b Edit the code so that the function plots the loss for the training and test loss for each epoch.
End of explanation
"""
model = MLP(np.shape(images_train[0])[0],100)
train_model(images_train, labels_train, images_test, labels_test, model)
"""
Explanation: The next block trains the code, assuming a hidden layer size of 100 neurons.
Problem 3c Change the learning rate lr to minimize the cross entropy score
End of explanation
"""
# evaluate the model
def evaluate_model(data,labels, model):
return(acc)
# evaluate the model
acc = evaluate_model(images_test,labels_test, model)
print('Accuracy: %.3f' % acc)
"""
Explanation: Write a function called evaluate_model which takes the image data, labels and model as input, and the accuracy as output. you can use the accuracy_score function.
End of explanation
"""
|
manuela98/Emergencias_911_ | Codigo/Documentación.ipynb | gpl-3.0 | %%bash
python descarga.py
"""
Explanation: Instalación
Para el correcto funcionamiento del código realizado para este proyecto es necesario seguir las siguientes indicaciones:
1. Instalar los paquetes beautifulsoup4 y Requests en Python:
+ pip install beautifulsoup4 Requests.
2. Instalar Numpy.
+ sudo pip install numpy
4. Instalar Ipython.
+ pip install ipython
5. Instalar R.
+ apt install -y r-base
5. Intalar Gnuplot.
+ apt -y install gnuplot
1. Ahora, para dar soporte al uso de gnuplot en el notebook:
* pip install --upgrade --no-cache-dir git+https://github.com/has2k1/gnuplot_kernel.git@master
6. Clonar el repositorio:
+ git clone https://github.com/manuela98/Emergencias_911_.git
Inicialización
Dirijase a la carpeta Codigo y ejecute descarga.py para descargar el archivo tzr.csv que contienen los datos con los que se trabajara.
End of explanation
"""
#Ejemplo
from IPython.display import display, Markdown
import consulta as c
display(Markdown(c.mark(c.Consulta())))
"""
Explanation: Para la extracción y manejo de datos se creó una matriz con los datos necesarios para su correcta consulta,esto se realiza con la función Datos_necesarios() ubicada en consultas.py
Consulta
Para realizar la consulta debe tener en cuenta los siguientes requerimientos:
Escribir las siguientes lineas de código en una celda posterior a esta:
from IPython.display import display, Markdown
import consulta as c
display(Markdown(c.Consulta()))
Al ejecutar la celda, aparecerá un cuadro en el cuál deberá ingresar el tipo de dato que desea buscar. Inmediatamente despues aparecerá otro cuadro en donde debe ingresar lo que desea buscar. Las opciones son las siguites:
Tittle : Sí su búsqueda es por tipo de emergencia.
Ejemplos: EMS, Fire, Traffic
Description : Sí su búsqueda es por descripción de la emergencia.
Ejemplos: FALL VICTIM , RESPIRATORY EMERGENCY , VEHICLE ACCIDENT
Date : Sí su búsqueda es por le fecha en que ocurrió la llamada.
Ejemplos: 2015-12-10 , 2015-10-10
Hour : Sí su búsqueda es por la hora en que ocurrió la llamada.
Ejemplos: 08:07:00 , 08:47:13
Si ha realizado la búsqueda de manera correcta se mostrará una tabla con las coincidencias del dato ingresado. la tabla le brindará una información completa que contiene Titulo, descripción, fecha y hora independientemente de cual haya sido el tipo de dato a buscar.
End of explanation
"""
%%bash
python GenArEst.py
"""
Explanation: ¿Cómo funciona el programa?
Los códigos realizados para este proyecto de aula estan implementados en python y utilizan los siguientes paquetes:
+ Requests
+ Beautifulsoup4
+ Webbrowser
+ numpy
+ Ipython
descarga.py
Este archivo es el encargado de la extracción web de la base de datos que se descargará como archivo de texto plano con el nombre de tzr.csv.
Este código ingresa a la página http://montcoalert.org/gettingdata/ que es nuestra fuente de datos y busca la url del archivo tzr.csv que esta nos facilita y lo descarga en el directorio Codigo.
consulta.py
Este archivo contiene dos funciones:
+ Datos_necesarios():Carga el archivo de texto a una matriz y luego selecciona las columnas de los datos con mas relevancia en este caso y lo carga a la matriz donde se realiza la consulta.
+ Consulta():Solicita al usuario que ingrese el tipo de dato y el dato de búsqueda,luego llama a la función Datos_necesarios() que se encarga de buscar las coincidencias del dato ingresado en la matriz. Inmediatamente después genera una tabla markdown con los resultados de la búsqueda.
Estadistica-Analisis de Datos
Despúes de realizar la instalacion,inicializacion indicadas anteriormente(si no esta en el directorio Codigo dirigase a el) y ejecute GenArEst.py como se muestra a continuación.
End of explanation
"""
%%bash
Rscript Estadistica.R #Estes archivo contiene el codigo que genera la estadistica y las imagenes.
"""
Explanation: El codigo contenido en GenArEst.py lo que hara es generar una serie de archivos- necesarios para las estadisticas descriptiva e inferencial.
Estadistica:
Se tomo un conjunto de datos de interes para realizar estadistica basica con r(media,max,min,desviacion estandar)tanto por dias como por meses:
End of explanation
"""
|
mrcinv/moodle-questions | python/example_images.ipynb | gpl-3.0 | %pylab inline
from moodle import *
num_q(-1.2,0.001), multi_q([("12",50),("23",50),("34",-100)])
"""
Explanation: Exercise sample: images
Change this sample according to your needs. Run all the cells, and upload resulting .xml file to Moodle.
Auxilary functions
End of explanation
"""
from scipy.interpolate import interp1d
x0 = sort(hstack((array([0,1]),rand(2)/2+0.25)))
y0 = sort(hstack((array([0,1]),rand(2)*abs(x0[1]-x0[2])/2+transpose(x0[1:2]))))
sp = interp1d(x0,y0,kind='cubic')
f = lambda x: cos(pi/2*x)
functions = [(lambda x: (1-x)**1.5,0,1), (lambda x: (1+x)**0.7,-1,0), (lambda x: cos(pi/2*x),4,5)]
random_points = lambda a,b: [randint(1,9)/10*(b-a) + a for i in range(3)] # 3 random points in [a,b]
parameters = [fun + tuple(random_points(fun[1],fun[2])) for fun in functions]
parameters
"""
Explanation: Question parameters
Generate parameters, that appear in the questions.
End of explanation
"""
import io
import base64
def question_text(parameter):
fun, a, b, x0, y0, xi = parameter # parameter contains function, interval boundaries
clf() # clear the plot
t = linspace(0,1)*(b-a) + a
y = fun(t)
plot(t,y)
grid()
xticks(arange(a,b,0.1))
yticks(arange(0,1,0.1))
xlim(a,b)
ylim(min(y),max(y))
strio = io.BytesIO() # plot figure into a string
savefig(strio,format="png")
val = strio.getvalue() # get image string and decode it with base64
img = base64.b64encode(val).decode()
strio.close()
q = """<p>Below is a graph of an unknown function f</p>
<img src="data:image/png;base64,%s" />
<p>What are approximate values of the following numbers (round it on 1 decimal place):</p>
<ul>
<li>f(%0.2f) %s </li>
<li>x, such that f(x)=%0.2f %s</li>
<li>\\(f^{-1}(%0.2f)\\) %s </li>
</ul>""" % (img,x0,num_q(fun(x0),0.05),fun(y0),num_q(y0,0.05),fun(xi),num_q(xi,0.05))
return q
# display the first question
from IPython.display import HTML
HTML(question_text(parameters[0]))
"""
Explanation: Question body
Write the function, that generates the text of the question. You can use the following syntax to add different inputs to
question string q:
value of a variable: q = q + str(x)
Python expressions: q = q + str(1+2*x)
answer input field: q = q + num_q(correct_answer, precision)
Note on embedding images
Images can be embeded in question text in the form of BASE64 encoded string via <img src="data:image/base64,..."/> tag. To save matplotlib image as an encoded string, one has to use io.BytesIO virtual bytes stream.
End of explanation
"""
# Write the questions to a file
name = "read_from_graph"
category = 'functions/graph/'
questions = []
for param in parameters:
b = question_text(param)
questions.append(b)
file = open(name + ".xml","w",encoding="utf8")
# Write to Moodle xml file
moodle_xml(name,questions, cloze_question, category = category, iostream = file)
file.close()
print("Questions were saved in " + name + ".xml, that can be imported into Moodle")
"""
Explanation: Write to file
End of explanation
"""
|
karlstroetmann/Formal-Languages | Python/Test-NFA-2-DFA.ipynb | gpl-2.0 | %run NFA-2-DFA.ipynb
"""
Explanation: This notebook is used to test the conversion of non-deterministic <span style="font-variant:small-caps;">Fsm</span>s into deterministic <span style="font-variant:small-caps;">Fsm</span>s.
End of explanation
"""
%run FSM-2-Dot.ipynb
States = { 'q' + str(i) for i in range(8) }
States
Sigma = { 'a', 'b' }
delta = {
('q0', ''): { 'q1', 'q2'},
('q1', 'b'): { 'q3' },
('q2', 'a'): { 'q4' },
('q3', 'a'): { 'q5' },
('q4', 'b'): { 'q6' },
('q5', ''): { 'q7' },
('q6', ''): { 'q7' },
('q7', ''): { 'q0' }
}
"""
Explanation: In order to represent the <span style="font-variant:small-caps;">Fsm</span>s graphically, we use the notebook FSM-2-Dot.ipynb.
End of explanation
"""
nfa44 = States, Sigma, delta, 'q0', { 'q7' }
"""
Explanation: The non-deterministic <span style="font-variant:small-caps;">Fsm</span> defined below is taken from the lecture notes.
End of explanation
"""
nfa2dot(nfa44)
"""
Explanation: The function nfa2dotcan be used to render this <span style="font-variant:small-caps;">Fsm</span>.
End of explanation
"""
dfa44 = nfa2dfa(nfa44)
"""
Explanation: This recognizes the same language as the language described by
$$ (a\cdot b + b \cdot a) \cdot (a\cdot b + b \cdot a)^* $$
Let us convert it into a deterministic <span style="font-variant:small-caps;">Fsm</span>:
End of explanation
"""
dot, states2Names = dfa2dot(dfa44)
dot
"""
Explanation: The function dfa2dotcan be used to render this <span style="font-variant:small-caps;">Fsm</span>.
End of explanation
"""
states2Names
"""
Explanation: In order to inspect the states of this deterministic <span style="font-variant:small-caps;">Fsm</span> we print the dictionary states2Names.
End of explanation
"""
dfa44
"""
Explanation: We can also print the <span style="font-variant:small-caps;">Fsm</span>.
End of explanation
"""
|
longyangking/ML | Statistics/Distribution.ipynb | lgpl-3.0 | import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
n,p=50,0.1
plt.hist(np.random.binomial(n,p,size=5000))
plt.show()
"""
Explanation: Statistical Distribution
Discrete distribution
Contious distribution
Sample(small) distribution
Discrete Distribution
Binomial distribution $B(n,p)$
Hypergeometric distribution
Geometric distribution
Poisson distribution $P(\lambda)$
1.1 Binomial distribution $B(n,p)$
$$ P(X=k) = C_n^k p^k (1-p)^{n-k}, k=0,1,...,n$$
then $ X \sim B(n,p) $.
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
ngood, nbad, nsamp = 90, 10, 50
plt.hist(np.random.hypergeometric(ngood, nbad, nsamp, 5000))
plt.show()
"""
Explanation: 1.2 Hypergeometric distribution
$$ P(X=k) = \frac {C_M^k C_{N-M}^{n-k}} {C_N^n} $$
then $ X \sim$ Hypergeometric distribution with parameters ${N,M,n}$
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(np.random.geometric(p=0.35, size=10000))
plt.show()
"""
Explanation: 1.3 Geometric distribution
$$ P(X=k) = p(1-p)^{k-1}$$
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(np.random.poisson(5, 10000))
plt.show()
"""
Explanation: 1.4 Poisson distribution $P(\lambda)$
$$ P(X=k) = e^{-\lambda} \frac{\lambda^k}{k!}, k = 0,1,2,... $$
then $ X \sim P(\lambda)$, $\lambda>0$
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(np.random.random_sample(1000))
plt.show()
"""
Explanation: Contious distribution
Homogeneous distribution $R(a,b)$
Exponential distribution $E(\lambda)$
Normal distribution $N(\mu,\sigma^2)$
2.1 Homogeneous distribution $R(a,b)$
$$ f(x)=\left{\begin{array}{ll}
c, & a<x<b \
0, & otherwise
\end{array}\right.$$
then $X \sim R(a,b)$
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(np.random.exponential(scale=1.0, size=1000))
plt.show()
"""
Explanation: 2.2 Exponential distribution $E(\lambda)$
$$ f(x)=\left{\begin{array}{ll}
\lambda e^{-\lambda x}, & x>0 \
0, & otherwise
\end{array}\right.$$
then $X \sim E(\lambda)$
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(np.random.normal(size=4000))
plt.show()
"""
Explanation: 2.3 Normal distribution $N(\mu,\sigma^2)$
$$ f(x) = \frac {1} {\sqrt{2\pi\sigma}} e^{-\frac{(x-\mu)^2}{2\sigma^2}} $$
then $X \sim N(\mu,\sigma^2)$.
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(np.random.chisquare(3,1000))
plt.show()
"""
Explanation: Sample(small) distribution
$\chi^2$ distribution $\chi^2(n)$
t distribution $t(n)$
F distribution $F(n)$
3.1 $\chi^2$ distribution $\chi^2(n)$
Given $X_i \sim N(0,1)$,
$$ Y = \sum_{i=1}^{N} X_i^2 \sim \chi^2(n) $$
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(np.random.standard_t(2,50))
plt.show()
"""
Explanation: 3.2 t distribution $t(n)$
Given $X \sim N(0,1)$, $Y \sim \chi^2(n)$,
$$ T \hat{=} \frac{X}{\sqrt{\frac{Y}{n}}} \sim t(n)$$
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(np.random.f(4,10,5000))
plt.show()
"""
Explanation: 3.3 F distribution $F(n)$
Given $X \sim \chi^2(m)$, $Y \sim \chi^2(n)$,
$$ F \hat{=} \frac{\frac{X}{m}}{\frac{Y}{n}} \sim F(m,n)$$
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/cnrm-cerfacs/cmip6/models/sandbox-1/toplevel.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cnrm-cerfacs', 'sandbox-1', 'toplevel')
"""
Explanation: ES-DOC CMIP6 Model Properties - Toplevel
MIP Era: CMIP6
Institute: CNRM-CERFACS
Source ID: SANDBOX-1
Sub-Topics: Radiative Forcings.
Properties: 85 (42 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:52
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Flux Correction
3. Key Properties --> Genealogy
4. Key Properties --> Software Properties
5. Key Properties --> Coupling
6. Key Properties --> Tuning Applied
7. Key Properties --> Conservation --> Heat
8. Key Properties --> Conservation --> Fresh Water
9. Key Properties --> Conservation --> Salt
10. Key Properties --> Conservation --> Momentum
11. Radiative Forcings
12. Radiative Forcings --> Greenhouse Gases --> CO2
13. Radiative Forcings --> Greenhouse Gases --> CH4
14. Radiative Forcings --> Greenhouse Gases --> N2O
15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
17. Radiative Forcings --> Greenhouse Gases --> CFC
18. Radiative Forcings --> Aerosols --> SO4
19. Radiative Forcings --> Aerosols --> Black Carbon
20. Radiative Forcings --> Aerosols --> Organic Carbon
21. Radiative Forcings --> Aerosols --> Nitrate
22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
24. Radiative Forcings --> Aerosols --> Dust
25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
27. Radiative Forcings --> Aerosols --> Sea Salt
28. Radiative Forcings --> Other --> Land Use
29. Radiative Forcings --> Other --> Solar
1. Key Properties
Key properties of the model
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Top level overview of coupled model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of coupled model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Flux Correction
Flux correction properties of the model
2.1. Details
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how flux corrections are applied in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Genealogy
Genealogy and history of the model
3.1. Year Released
Is Required: TRUE Type: STRING Cardinality: 1.1
Year the model was released
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.2. CMIP3 Parent
Is Required: FALSE Type: STRING Cardinality: 0.1
CMIP3 parent if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. CMIP5 Parent
Is Required: FALSE Type: STRING Cardinality: 0.1
CMIP5 parent if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.4. Previous Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Previously known as
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Software Properties
Software properties of model
4.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.4. Components Structure
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how model realms are structured into independent software components (coupled via a coupler) and internal software components.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 4.5. Coupler
Is Required: FALSE Type: ENUM Cardinality: 0.1
Overarching coupling framework for model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Coupling
**
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of coupling in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.2. Atmosphere Double Flux
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 5.3. Atmosphere Fluxes Calculation Grid
Is Required: FALSE Type: ENUM Cardinality: 0.1
Where are the air-sea fluxes calculated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.4. Atmosphere Relative Winds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Tuning Applied
Tuning methodology for model
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics/diagnostics of the global mean state used in tuning model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics/diagnostics used in tuning model/component (such as 20th century)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.5. Energy Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.6. Fresh Water Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Conservation --> Heat
Global heat convervation properties of the model
7.1. Global
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how heat is conserved globally
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Atmos Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the atmosphere/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Atmos Land Interface
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how heat is conserved at the atmosphere/land coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.4. Atmos Sea-ice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the atmosphere/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.5. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.6. Land Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the land/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation --> Fresh Water
Global fresh water convervation properties of the model
8.1. Global
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how fresh_water is conserved globally
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Atmos Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh_water is conserved at the atmosphere/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Atmos Land Interface
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how fresh water is conserved at the atmosphere/land coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Atmos Sea-ice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.5. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh water is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.6. Runoff
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how runoff is distributed and conserved
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.7. Iceberg Calving
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how iceberg calving is modeled and conserved
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.8. Endoreic Basins
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how endoreic basins (no ocean access) are treated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.9. Snow Accumulation
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how snow accumulation over land and over sea-ice is treated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Key Properties --> Conservation --> Salt
Global salt convervation properties of the model
9.1. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how salt is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10. Key Properties --> Conservation --> Momentum
Global momentum convervation properties of the model
10.1. Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how momentum is conserved in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Radiative Forcings
Radiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5)
11.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of radiative forcings (GHG and aerosols) implementation in model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Carbon dioxide forcing
12.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Methane forcing
13.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Nitrous oxide forcing
14.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Troposheric ozone forcing
15.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Stratospheric ozone forcing
16.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Ozone-depleting and non-ozone-depleting fluorinated gases forcing
17.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.2. Equivalence Concentration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Details of any equivalence concentrations used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Radiative Forcings --> Aerosols --> SO4
SO4 aerosol forcing
18.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Radiative Forcings --> Aerosols --> Black Carbon
Black carbon aerosol forcing
19.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Organic carbon aerosol forcing
20.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21. Radiative Forcings --> Aerosols --> Nitrate
Nitrate forcing
21.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Cloud albedo effect forcing (RFaci)
22.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22.2. Aerosol Effect On Ice Clouds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative effects of aerosols on ice clouds are represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Cloud lifetime effect forcing (ERFaci)
23.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.2. Aerosol Effect On Ice Clouds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative effects of aerosols on ice clouds are represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.3. RFaci From Sulfate Only
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative forcing from aerosol cloud interactions from sulfate aerosol only?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24. Radiative Forcings --> Aerosols --> Dust
Dust forcing
24.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 24.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Tropospheric volcanic forcing
25.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Historical Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in historical simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Future Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in future simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 25.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Stratospheric volcanic forcing
26.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26.2. Historical Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in historical simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26.3. Future Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in future simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Radiative Forcings --> Aerosols --> Sea Salt
Sea salt forcing
27.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 28. Radiative Forcings --> Other --> Land Use
Land use forcing
28.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 28.2. Crop Change Only
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Land use change represented via crop change only?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 29. Radiative Forcings --> Other --> Solar
Solar forcing
29.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How solar forcing is provided
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
|
trangel/Insight-Data-Science | general-docs/.ipynb_checkpoints/python_sql_dev_setups-checkpoint.ipynb | gpl-3.0 | ## Python packages - you may have to pip install sqlalchemy, sqlalchemy_utils, and psycopg2.
from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database
import psycopg2
import pandas as pd
"""
Explanation: Dev Setups -- Connecting Python and SQL
The purpose of this IPython notebook is to demonstrate the usefulness of connecting python to a relational database by using a python toolkit called SQLAlchemy.
First off, what is a relational database?
Basically, it is a way to store information such that information can be retrieved from it.
MySQL and PostgreSQL are examples of relational databases. For the purposes of an Insight project, you can use either one.
Why would you use a relational database instead of a csv or two?
A few reasons:
They scale easily
They are easy to query
It’s possible to do transactions in those cases where you need to write to a database, not just read from it
Everyone in industry uses them, so you should get familiar with them, too.
What does a relational database look like?
We can take a look. First we need to set up a few things. The first thing we want to do is to get a PostgreSQL server up and running. Go to http://postgresapp.com/ and follow the three steps listed in the Quick Installation Guide. (If you aren't running a Mac, you can download PostgreSQL at http://www.postgresql.org/)
-- you can also use homebrew, but your path will change below --
If you are running Linux a Fellow just posted install directions here: https://gist.github.com/mskoh52/a01d1af3acae43c2c341101a28e504be
We'll come back to PostgreSQL in a moment. First, we'll set up SQLAlchemy. Go ahead and try to implement the following.
End of explanation
"""
#In Python: Define a database name (we're using a dataset on births, so I call it
# birth_db), and your username for your computer (CHANGE IT BELOW).
dbname = 'birth_db'
username = 'rangel'
"""
Explanation: If working in an anaconda environment, we recommend using their install
conda install psycopg2
If you have trouble installing psycopg2 and get the error "pg_config executable not found", try adding "/Applications/Postgres.app/Contents/Versions/9.6/bin" to your PATH by typing the following in your terminal (you may have to check your version number):
export PATH="/Applications/Postgres.app/Contents/Versions/9.6/bin:$PATH"
Then try installing again:
pip install psycopg2
End of explanation
"""
## 'engine' is a connection to a database
## Here, we're using postgres, but sqlalchemy can connect to other things too.
engine = create_engine('postgres://%s@localhost/%s'%(username,dbname))
print (engine.url)
## create a database (if it doesn't exist)
if not database_exists(engine.url):
create_database(engine.url)
print(database_exists(engine.url))
# read a database from CSV and load it into a pandas dataframe
birth_data = pd.DataFrame.from_csv('births2012_downsampled.csv')
## insert data into database from Python (proof of concept - this won't be useful for big data, of course)
birth_data.to_sql('birth_data_table', engine, if_exists='replace')
"""
Explanation: Start your postgresql server
There are multiple ways to launch a postgres server:
1) Launching Postres.app from LaunchPad will automatically start a server. In Mac OS, you should see an elephant icon in the upper right corner.
2) Launch from the terminal with the following command (CHANGE USER NAME):<br>
postgres -D /Users/rockson/Library/Application\ Support/Postgres/var-9.6
3) Have launchd start postgresql at login:<br>
ln -sfv /usr/local/opt/postgresql/*.plist ~/Library/LaunchAgents
Then to load postgresql now: <br>
launchctl load ~/Library/LaunchAgents/homebrew.mxcl.postgresql.plist
Create a database
End of explanation
"""
## Now try the same queries, but in python!
# connect:
con = None
con = psycopg2.connect(database = dbname, user = username)
# query:
sql_query = """
SELECT * FROM birth_data_table WHERE delivery_method='Cesarean';
"""
birth_data_from_sql = pd.read_sql_query(sql_query,con)
birth_data_from_sql.head()
"""
Explanation: The above line (to_sql) is doing a lot of heavy lifting. It's reading a dataframe, it's creating a table, and adding the data to the table. So SQLAlchemy is quite useful!
How this works outside of python:
open up the PostgreSQL app, click on the "Open psql" button in the bottom right corner, <br>
or alternatively type <br>
psql -h localhost
into the command line
Connect to the "birth_db" database we created
\c birth_db
You should see something like the following
You are now connected to database "birth_db" as user "rockson".
Then try the following query:
SELECT * FROM birth_data_table;
Note that the semi-colon indicates an end-of-statement.
You can see the table we created! But it's kinda ugly and hard to read.
Try a few other sample queries. Before you type in each one, ask yourself what you think the output will look like:
SELECT * FROM birth_data_table WHERE infant_sex='M';
SELECT COUNT(infant_sex) FROM birth_data_table WHERE infant_sex='M';
SELECT COUNT(gestation_weeks), infant_sex FROM birth_data_table WHERE infant_sex = 'M' GROUP BY gestation_weeks, infant_sex;
SELECT gestation_weeks, COUNT(gestation_weeks) FROM birth_data_table WHERE infant_sex = 'M' GROUP BY gestation_weeks;
End of explanation
"""
import time
t0 = time.time()
birth_data_from_sql = pd.read_sql_query(sql_query,con)
t1 = time.time()
total = t1-t0
print (total)
birth_data_from_sql.head()
birth_data = pd.DataFrame.from_csv('births2012_downsampled.csv')
t0 = time.time()
birth_data=birth_data.loc[(birth_data['delivery_method'] == 'Cesarean')]
t1 = time.time()
total = t1-t0
print (total)
birth_data.head()
"""
Explanation: Is reading from a SQL database faster than from a Pandas dataframe? Probably not for the amount of data you can fit on your machine.
End of explanation
"""
|
bukosabino/btctrading | XGBoost_next_row.ipynb | mit | # get_data.get('data/datas.csv', period=settings.PERIOD, market=settings.MARKET)
"""
Explanation: Get Data
API: http://bitcoincharts.com/charts
period = ['1-min', '5-min', '15-min', '30-min', 'Hourly', '2-hour', '6-hour', '12-hour', 'Daily', 'Weekly']
market = ['krakenEUR', 'bitstampUSD'] -> list of markets: https://bitcoincharts.com/charts/volumepie/
End of explanation
"""
df = pd.read_csv('data/datas.csv', sep=',')
# add next row
last_timestamp = df['Timestamp'].iloc[-1]
if settings.PERIOD == 'Hourly':
next_timestamp = last_timestamp + 3600
df_next = pd.DataFrame([next_timestamp], columns=['Timestamp'])
df = df.append(df_next, ignore_index=True)
df.iloc[-1] = df.iloc[-1].fillna(1)
print('Number of rows: {}, Number of columns: {}'.format(*df.shape))
"""
Explanation: Load Data
End of explanation
"""
df = utils.dropna(df)
print('Number of rows: {}, Number of columns: {}'.format(*df.shape))
"""
Explanation: Preprocessing
End of explanation
"""
df['Target'] = 0 # 'KEEP'
df.loc[df.Open + (df.Open * settings.PERCENT_UP) < df.Close, 'Target'] = 1 # 'UP'
df.loc[df.Open - (df.Open * settings.PERCENT_DOWN) > df.Close, 'Target'] = 2 # 'DOWN'
print('Number of rows: {}, Number of columns: {}'.format(*df.shape))
print('Number of UP rows: {}, Number of DOWN rows: {}'.format(len(df[df.Target == 1]), len(df[df.Target == 2])))
"""
Explanation: Transformation
Create column target with class [UP, KEEP, DOWN]
End of explanation
"""
df['Date'] = df['Timestamp'].apply(utils.timestamptodate)
df['Date'] = pd.to_datetime(df['Date'])
df['Year'] = df['Date'].dt.year
df['Month'] = df['Date'].dt.month
df['Week'] = df['Date'].dt.weekofyear
df['Weekday'] = df['Date'].dt.weekday
df['Day'] = df['Date'].dt.day
df['Hour'] = df['Date'].dt.hour
# extra dates
# df["yearmonth"] = df["Date"].dt.year*100 + df["Date"].dt.month
# df["yearweek"] = df["Date"].dt.year*100 + df["Date"].dt.weekofyear
# df["yearweekday"] = df["Date"].dt.year*10 + df["Date"].dt.weekday
# shift
cols = ['Open', 'High', 'Low', 'Close', 'Volume_BTC', 'Volume_Currency', 'Weighted_Price']
for col in cols:
df[col] = df[col].shift(1)
df = df.dropna()
df['High-low'] = df['High'] - df['Low']
df['Close-open'] = df['Close'] - df['Open']
df['Up_or_Down'] = 0 # 'UP' or 'DOWN' if diff > settings.PERCENT_UP
df.loc[( df.Open + (df.Open * settings.PERCENT_UP) ) < df.Close, 'Up_or_Down'] = 1 # 'UP'
df.loc[( df.Open - (df.Open * settings.PERCENT_DOWN) ) > df.Close, 'Up_or_Down'] = 2 # 'DOWN'
df['Up_or_Down_2'] = 0 # 'UP' or 'DOWN' if diff > settings.PERCENT_UP * 2
df.loc[df.Open + (df.Open * settings.PERCENT_UP * 2 ) < df.Close, 'Up_or_Down_2'] = 1 # 'UP'
df.loc[df.Open - (df.Open * settings.PERCENT_DOWN * 2) > df.Close, 'Up_or_Down_2'] = 2 # 'DOWN'
df['Up_or_Down_3'] = 0 # 'UP' or 'DOWN' if diff > 0
df.loc[df.Open < df.Close, 'Up_or_Down_3'] = 1 # 'UP'
df.loc[df.Open > df.Close, 'Up_or_Down_3'] = 2 # 'DOWN'
df['Up_or_Down_4'] = 0 # 'UP' or 'DOWN' if diff > settings.PERCENT_UP / 2
df.loc[df.Open + (df.Open * settings.PERCENT_UP / 2 ) < df.Close, 'Up_or_Down_4'] = 1 # 'UP'
df.loc[df.Open - (df.Open * settings.PERCENT_DOWN / 2) > df.Close, 'Up_or_Down_4'] = 2 # 'DOWN'
# Fundamental analysis
# daily return
df['Daily_return'] = (df['Close'] / df['Close'].shift(1)) - 1
df['Daily_return_100'] = ((df['Close'] / df['Close'].shift(1)) - 1) * 100
# cumulative return
df = df.dropna()
df['Cumulative_return'] = (df['Close'] / df['Close'].iloc[0]) - 1
df['Cumulative_return_100'] = ((df['Close'] / df['Close'].iloc[0]) - 1) * 100
# TODO: cumulative return week, month, year...
print('Number of rows: {}, Number of columns: {}'.format(*df.shape))
"""
Explanation: Create columns from Timestamp to Date, Year, Month, Hour, etc.
Feature Engineering
End of explanation
"""
# Accumulation/Distribution index
df['Acc_Dist_Roc_BTC'] = acc_dist_roc(df, 'Volume_BTC', 2)
df['Acc_Dist_Roc_Currency'] = acc_dist_roc(df, 'Volume_Currency', 2)
df['Acc_Dist_BTC'] = acc_dist_index(df, 'Volume_BTC')
df['Acc_Dist_Currency'] = acc_dist_index(df, 'Volume_Currency')
# Chaikin Money Flow
df['Chaikin_Money_Flow_1_BTC'] = chaikin_money_flow1(df, 'Volume_BTC')
df['Chaikin_Money_Flow_2_BTC'] = chaikin_money_flow2(df, 'Volume_BTC', 20)
df['Chaikin_Money_Flow_3_BTC'] = chaikin_money_flow3(df, 'Volume_BTC', 20)
df['Chaikin_Money_Flow_1_Currency'] = chaikin_money_flow1(df, 'Volume_Currency')
df['Chaikin_Money_Flow_2_Currency'] = chaikin_money_flow2(df, 'Volume_Currency', 20)
df['Chaikin_Money_Flow_3_Currency'] = chaikin_money_flow3(df, 'Volume_Currency', 20)
# Money Flow Index
df['Money_Flow_BTC'] = money_flow_index(df, 'Volume_BTC', 14)
df['Money_Flow_Currency'] = money_flow_index(df, 'Volume_Currency', 14)
# On-balance volume
df['OBV_BTC'] = on_balance_volume(df, 'Volume_BTC')
df['OBV_BTC_mean'] = on_balance_volume_mean(df, 'Volume_BTC')
df['OBV_Currency'] = on_balance_volume(df, 'Volume_Currency')
df['OBV_Currency_mean'] = on_balance_volume_mean(df, 'Volume_Currency')
# Force Index
df['Force_Index_BTC'] = force(df, 'Volume_BTC', 2)
df['Force_Index_Currency'] = force(df, 'Volume_Currency', 2)
# delete intermediate columns
df.drop('OBV', axis=1, inplace=True)
"""
Explanation: Technical Analysis
https://en.wikipedia.org/wiki/Technical_analysis
Volume-based indicators
End of explanation
"""
# Moving Average Convergence Divergence
df[['MACD', 'MACD_sign', 'MACD_diff']] = macd(df, 12, 26, 9)
# Average directional movement index
df[['ADX', 'ADX_pos', 'ADX_neg']] = adx(df, 14)
# Vortex indicator
df[['Vortex_pos', 'Vortex_neg']] = vortex(df, 14)
"""
Explanation: Trend indicators
End of explanation
"""
df['RSI'] = rsi(df, 14)
"""
for c in df.columns:
print str(c) + u' - ' + str(df[c].isnull().sum())
"""
"""
Explanation: Momentum Indicators
End of explanation
"""
# Momentum
for idx in range(9):
m = idx+2
df['Momentum_'+str(m)] = ((df['Close'] / df['Close'].shift(m)) - 1)
# Rollings
for idx in range(9):
m = idx+2
df['Rolling_mean_'+str(m)] = (df.set_index('Date')['Close'].rolling(window=m).mean()).values
df['Rolling_std_'+str(m)] = (df.set_index('Date')['Close'].rolling(window=m).std()).values
df['Rolling_cov_'+str(m)] = (df.set_index('Date')['Close'].rolling(window=m).cov()).values
# Bollinger bands
for idx in range(9):
m = idx+2
df['Bollinger_band_mean_'+str(m)+'_max'] = df['Rolling_mean_'+str(m)] + (2*df['Rolling_std_'+str(m)])
df['Bollinger_band_mean_'+str(m)+'_min'] = df['Rolling_mean_'+str(m)] - (2*df['Rolling_std_'+str(m)])
print('Number of rows: {}, Number of columns: {}'.format(*df.shape))
df = df.dropna()
print('Number of rows: {}, Number of columns: {}'.format(*df.shape))
"""
Explanation: Price-based indicators
End of explanation
"""
train, test = utils.split_df(df)
excl = ['Target', 'Date', 'Timestamp']
cols = [c for c in df.columns if c not in excl]
"""
Explanation: Split
End of explanation
"""
y_train = train['Target']
y_mean = np.mean(y_train)
xgb_params = {
'n_trees': 800,
'eta': 0.0045,
'max_depth': 20,
'subsample': 0.95,
'colsample_bytree': 0.95,
'colsample_bylevel': 0.95,
'objective': 'multi:softmax',
'num_class' : 3,
'eval_metric': 'mlogloss', # 'merror', # 'rmse',
'base_score': 0,
'silent': 1
}
dtrain = xgb.DMatrix(train[cols], y_train)
dtest = xgb.DMatrix(test[cols])
cv_result = xgb.cv(xgb_params, dtrain)
# xgboost, cross-validation
cv_result = xgb.cv(xgb_params,
dtrain,
num_boost_round=5000,
early_stopping_rounds=50,
verbose_eval=50,
show_stdv=False
)
num_boost_rounds = len(cv_result)
# num_boost_rounds = 1000
print(num_boost_rounds)
# train
model = xgb.train(xgb_params, dtrain, num_boost_round=num_boost_rounds)
# predict
y_pred = model.predict(dtest)
y_true = test['Target']
prediction_value = y_true.tolist()[0]
if prediction_value == 1.0:
print("Prediction: UP")
elif prediction_value == 2.0:
print("Prediction: DOWN")
else: # 0.0
print("Prediction: KEEP")
print "\n \n \n \n \n \n ********** WEIGHT ************"
importance = model.get_fscore()
importance = sorted(importance.items(), key=operator.itemgetter(1))
for i in importance:
print i
print "\n \n \n \n \n \n ********** GAIN ************"
importance = model.get_score(fmap='', importance_type='gain')
importance = sorted(importance.items(), key=operator.itemgetter(1))
for i in importance:
print i
"""
Explanation: xgboost
End of explanation
"""
|
roatienza/Deep-Learning-Experiments | versions/2022/autoencoder/python/colorize_pytorch_demo.ipynb | mit | import torch
import torchvision
import wandb
import time
from torch import nn
from einops import rearrange, reduce
from argparse import ArgumentParser
from pytorch_lightning import LightningModule, Trainer, Callback
from pytorch_lightning.loggers import WandbLogger
from torch.optim import Adam
from torch.optim.lr_scheduler import CosineAnnealingLR
"""
Explanation: Colorization AutoEncoder PyTorch Demo using CIFAR10
In this demo, we build a simple colorization autoencoder using PyTorch.
End of explanation
"""
class Encoder(nn.Module):
def __init__(self, n_features=1, kernel_size=3, n_filters=64, feature_dim=256):
super().__init__()
self.conv1 = nn.Conv2d(n_features, n_filters, kernel_size=kernel_size, stride=2)
self.conv2 = nn.Conv2d(n_filters, n_filters*2, kernel_size=kernel_size, stride=2)
self.conv3 = nn.Conv2d(n_filters*2, n_filters*4, kernel_size=kernel_size, stride=2)
self.fc1 = nn.Linear(2304, feature_dim)
def forward(self, x):
y = nn.ReLU()(self.conv1(x))
y = nn.ReLU()(self.conv2(y))
y = nn.ReLU()(self.conv3(y))
y = rearrange(y, 'b c h w -> b (c h w)')
y = self.fc1(y)
return y
# use this to get the correct input shape for fc1.
encoder = Encoder(n_features=1)
x = torch.Tensor(1, 1, 32, 32)
h = encoder(x)
print("h.shape:", h.shape)
"""
Explanation: CNN Encoder using PyTorch
We use 3 CNN layers to encode the grayscale input image. We use stride of 2 to reduce the feature map size. The last MLP layer resizes the flattened feature map to the target latent vector size. We use more filters and a much bigger latent vector size of 256 to encode more information.
End of explanation
"""
class Decoder(nn.Module):
def __init__(self, kernel_size=3, n_filters=256, feature_dim=256, output_size=32, output_channels=3):
super().__init__()
self.init_size = output_size // 2**2
self.fc1 = nn.Linear(feature_dim, self.init_size**2 * n_filters)
# output size of conv2dtranspose is (h-1)*2 + 1 + (kernel_size - 1)
self.conv1 = nn.ConvTranspose2d(n_filters, n_filters//2, kernel_size=kernel_size, stride=2, padding=1)
self.conv2 = nn.ConvTranspose2d(n_filters//2, n_filters//4, kernel_size=kernel_size, stride=2, padding=1)
self.conv3 = nn.ConvTranspose2d(n_filters//4, n_filters//4, kernel_size=kernel_size, padding=1)
self.conv4 = nn.ConvTranspose2d(n_filters//4, output_channels, kernel_size=kernel_size+1)
def forward(self, x):
B, _ = x.shape
y = self.fc1(x)
y = rearrange(y, 'b (c h w) -> b c h w', b=B, h=self.init_size, w=self.init_size)
y = nn.ReLU()(self.conv1(y))
y = nn.ReLU()(self.conv2(y))
y = nn.ReLU()(self.conv3(y))
y = nn.Sigmoid()(self.conv4(y))
return y
decoder = Decoder()
x_tilde = decoder(h)
print("x_tilde.shape:", x_tilde.shape)
"""
Explanation: CNN Decoder using PyTorch
A decoder is used to reconstruct the input image. The decoder is trained to reconstruct the input data from the latent space. The architecture is similar to the encoder but inverted. A latent vector is resized using an MLP layer so that it is suitable for a convolutional layer. We use strided tranposed convolutional layers to upsample the feature map until the desired image size is reached. The target image is the colorized version of the input image.
End of explanation
"""
def gray_collate_fn(batch):
x, _ = zip(*batch)
x = torch.stack(x, dim=0)
xn = reduce(x,"b c h w -> b 1 h w", 'mean')
return xn, x
class LitColorizeCIFAR10Model(LightningModule):
def __init__(self, feature_dim=256, lr=0.001, batch_size=64,
num_workers=4, max_epochs=30, **kwargs):
super().__init__()
self.save_hyperparameters()
self.encoder = Encoder(feature_dim=feature_dim)
self.decoder = Decoder(feature_dim=feature_dim)
self.loss = nn.MSELoss()
def forward(self, x):
h = self.encoder(x)
x_tilde = self.decoder(h)
return x_tilde
# this is called during fit()
def training_step(self, batch, batch_idx):
x_in, x = batch
x_tilde = self.forward(x_in)
loss = self.loss(x_tilde, x)
return {"loss": loss}
# calls to self.log() are recorded in wandb
def training_epoch_end(self, outputs):
avg_loss = torch.stack([x["loss"] for x in outputs]).mean()
self.log("train_loss", avg_loss, on_epoch=True)
# this is called at the end of an epoch
def test_step(self, batch, batch_idx):
x_in, x = batch
x_tilde = self.forward(x_in)
loss = self.loss(x_tilde, x)
return {"x_in" : x_in, "x": x, "x_tilde" : x_tilde, "test_loss" : loss,}
# this is called at the end of all epochs
def test_epoch_end(self, outputs):
avg_loss = torch.stack([x["test_loss"] for x in outputs]).mean()
self.log("test_loss", avg_loss, on_epoch=True, prog_bar=True)
# validation is the same as test
def validation_step(self, batch, batch_idx):
return self.test_step(batch, batch_idx)
def validation_epoch_end(self, outputs):
return self.test_epoch_end(outputs)
# we use Adam optimizer
def configure_optimizers(self):
optimizer = Adam(self.parameters(), lr=self.hparams.lr)
# this decays the learning rate to 0 after max_epochs using cosine annealing
scheduler = CosineAnnealingLR(optimizer, T_max=self.hparams.max_epochs)
return [optimizer], [scheduler],
# this is called after model instatiation to initiliaze the datasets and dataloaders
def setup(self, stage=None):
self.train_dataloader()
self.test_dataloader()
# build train and test dataloaders using MNIST dataset
# we use simple ToTensor transform
def train_dataloader(self):
return torch.utils.data.DataLoader(
torchvision.datasets.CIFAR10(
"./data", train=True, download=True,
transform=torchvision.transforms.ToTensor()
),
batch_size=self.hparams.batch_size,
shuffle=True,
num_workers=self.hparams.num_workers,
pin_memory=True,
collate_fn=gray_collate_fn
)
def test_dataloader(self):
return torch.utils.data.DataLoader(
torchvision.datasets.CIFAR10(
"./data", train=False, download=True,
transform=torchvision.transforms.ToTensor()
),
batch_size=self.hparams.batch_size,
shuffle=False,
num_workers=self.hparams.num_workers,
pin_memory=True,
collate_fn=gray_collate_fn
)
def val_dataloader(self):
return self.test_dataloader()
"""
Explanation: PyTorch Lightning Colorization AutoEncoder
In the colorization autoencoder, the encoder extracts features from the input image and the decoder reconstructs the input image from the latent space. The decoder adds color. The decoder's last layer has 3 output channels corresponding to RGB.
We gray_collate_fn to generate gray images from RGB images.
End of explanation
"""
def get_args():
parser = ArgumentParser(description="PyTorch Lightning Colorization AE CIFAR10 Example")
parser.add_argument("--max-epochs", type=int, default=30, help="num epochs")
parser.add_argument("--batch-size", type=int, default=64, help="batch size")
parser.add_argument("--lr", type=float, default=0.001, help="learning rate")
parser.add_argument("--feature-dim", type=int, default=256, help="ae feature dimension")
parser.add_argument("--devices", default=1)
parser.add_argument("--accelerator", default='gpu')
parser.add_argument("--num-workers", type=int, default=4, help="num workers")
args = parser.parse_args("")
return args
"""
Explanation: Arguments
Similar to the MNIST AE but we use a bigger latent vector size of 256 given that the colorization task needs more feature information from the input image.
End of explanation
"""
class WandbCallback(Callback):
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
# process first 10 images of the first batch
if batch_idx == 0:
x, c = batch
n = pl_module.hparams.batch_size // 4
outputs = outputs["x_tilde"]
columns = ['gt color', 'gray', 'colorized']
data = [[wandb.Image(c_i), wandb.Image(x_i), wandb.Image(x_tilde_i)] for c_i, x_i, x_tilde_i in list(zip(c[:n], x[:n], outputs[:n]))]
wandb_logger.log_table(key="cifar10-colorize-ae", columns=columns, data=data)
"""
Explanation: Weights and Biases Callback
The callback logs train and validation metrics to wandb. It also logs sample predictions. This is similar to our WandbCallback example for MNIST.
End of explanation
"""
if __name__ == "__main__":
args = get_args()
ae = LitColorizeCIFAR10Model(feature_dim=args.feature_dim, lr=args.lr,
batch_size=args.batch_size, num_workers=args.num_workers,
max_epochs=args.max_epochs)
#ae.setup()
wandb_logger = WandbLogger(project="colorize-cifar10")
start_time = time.time()
trainer = Trainer(accelerator=args.accelerator,
devices=args.devices,
max_epochs=args.max_epochs,
logger=wandb_logger,
callbacks=[WandbCallback()])
trainer.fit(ae)
elapsed_time = time.time() - start_time
print("Elapsed time: {}".format(elapsed_time))
wandb.finish()
"""
Explanation: Training an AE
We train the autoencoder on the CIFAR10 dataset.
The results can be viewed on wandb.
End of explanation
"""
|
voyageth/udacity-Deep_Learning_Foundations_Nanodegree | sentiment-rnn/Sentiment_RNN.ipynb | mit | import numpy as np
import tensorflow as tf
with open('../sentiment-network/reviews.txt', 'r') as f:
reviews = f.read()
with open('../sentiment-network/labels.txt', 'r') as f:
labels = f.read()
reviews[:2000]
"""
Explanation: Sentiment Analysis with an RNN
In this notebook, you'll implement a recurrent neural network that performs sentiment analysis. Using an RNN rather than a feedfoward network is more accurate since we can include information about the sequence of words. Here we'll use a dataset of movie reviews, accompanied by labels.
The architecture for this network is shown below.
<img src="assets/network_diagram.png" width=400px>
Here, we'll pass in words to an embedding layer. We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the word2vec lesson. You can actually train up an embedding with word2vec and use it here. But it's good enough to just have an embedding layer and let the network learn the embedding table on it's own.
From the embedding layer, the new representations will be passed to LSTM cells. These will add recurrent connections to the network so we can include information about the sequence of words in the data. Finally, the LSTM cells will go to a sigmoid output layer here. We're using the sigmoid because we're trying to predict if this text has positive or negative sentiment. The output layer will just be a single unit then, with a sigmoid activation function.
We don't care about the sigmoid outputs except for the very last one, we can ignore the rest. We'll calculate the cost from the output of the last step and the training label.
End of explanation
"""
from string import punctuation
all_text = ''.join([c for c in reviews if c not in punctuation])
reviews = all_text.split('\n')
all_text = ' '.join(reviews)
words = all_text.split()
all_text[:2000]
words[:100]
"""
Explanation: Data preprocessing
The first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit.
You can see an example of the reviews data above. We'll want to get rid of those periods. Also, you might notice that the reviews are delimited with newlines \n. To deal with those, I'm going to split the text into each review using \n as the delimiter. Then I can combined all the reviews back together into one big string.
First, let's remove all punctuation. Then get all the text without the newlines and split it into individual words.
End of explanation
"""
# Create your dictionary that maps vocab words to integers here
word_set = set()
for word in words:
word_set.add(word)
vocab_to_int = {word: i for i, word in enumerate(word_set)}
# Convert the reviews to integers, same shape as reviews list, but with integers
reviews_ints = []
for review in reviews:
review_ints = [vocab_to_int[word] for word in review.split()]
reviews_ints.append(review_ints)
"""
Explanation: Encoding the words
The embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network.
Exercise: Now you're going to encode the words with integers. Build a dictionary that maps words to integers. Later we're going to pad our input vectors with zeros, so make sure the integers start at 1, not 0.
Also, convert the reviews to integers and store the reviews in a new list called reviews_ints.
End of explanation
"""
# Convert labels to 1s and 0s for 'positive' and 'negative'
labels = labels.split('\n')
labels = np.array([1 if each == 'positive' else 0 for each in labels])
"""
Explanation: Encoding the labels
Our labels are "positive" or "negative". To use these labels in our network, we need to convert them to 0 and 1.
Exercise: Convert labels from positive and negative to 1 and 0, respectively.
End of explanation
"""
from collections import Counter
review_lens = Counter([len(x) for x in reviews_ints])
print("Zero-length reviews: {}".format(review_lens[0]))
print("Maximum review length: {}".format(max(review_lens)))
"""
Explanation: If you built labels correctly, you should see the next output.
End of explanation
"""
# Filter out that review with 0 length
print("before", reviews_ints[-2:])
print(len(reviews_ints))
print(len(labels))
for i, review_ints in enumerate(reviews_ints):
if len(review_ints) <= 0:
print("delete index : ", i)
del reviews_ints[i]
np.delete(labels, i)
print("after", reviews_ints[-2:])
print(len(reviews_ints))
print(len(labels))
#reviews_ints = [review_ints for review_ints in reviews_ints if len(review_ints) > 0 ]
"""
Explanation: Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. Let's truncate to 200 steps. For reviews shorter than 200, we'll pad with 0s. For reviews longer than 200, we can truncate them to the first 200 characters.
Exercise: First, remove the review with zero length from the reviews_ints list.
End of explanation
"""
seq_len = 200
features = np.zeros((len(reviews_ints), seq_len), dtype=int)
for i, review_ints in enumerate(reviews_ints):
features[i, -len(review_ints):] = review_ints[:seq_len]
"""
Explanation: Exercise: Now, create an array features that contains the data we'll pass to the network. The data should come from review_ints, since we want to feed integers to the network. Each row should be 200 elements long. For reviews shorter than 200 words, left pad with 0s. That is, if the review is ['best', 'movie', 'ever'], [117, 18, 128] as integers, the row will look like [0, 0, 0, ..., 0, 117, 18, 128]. For reviews longer than 200, use on the first 200 words as the feature vector.
This isn't trivial and there are a bunch of ways to do this. But, if you're going to be building your own deep learning networks, you're going to have to get used to preparing your data.
End of explanation
"""
features[:10,:100]
"""
Explanation: If you build features correctly, it should look like that cell output below.
End of explanation
"""
print(len(features))
print(len(labels))
split_frac = 0.8
split_index = int(len(features) * split_frac)
print(split_index)
print(len(features))
print(len(labels))
split_frac = 0.8
split_index = int(len(features) * split_frac)
print(split_index)
test_split_frac = 0.9
test_split_index = int(len(features) * test_split_frac)
print(test_split_index)
train_x, val_x = features[:split_index], features[split_index:test_split_index]
train_y, val_y = labels[:split_index], labels[split_index:test_split_index]
val_x, test_x = features[split_index:test_split_index], features[test_split_index:]
val_y, test_y = labels[split_index:test_split_index], labels[test_split_index:]
print("\t\t\tFeature Shapes:")
print("Train set: \t\t{}".format(train_x.shape),
"\nValidation set: \t{}".format(val_x.shape),
"\nTest set: \t\t{}".format(test_x.shape))
"""
Explanation: Training, Validation, Test
With our data in nice shape, we'll split it into training, validation, and test sets.
Exercise: Create the training, validation, and test sets here. You'll need to create sets for the features and the labels, train_x and train_y for example. Define a split fraction, split_frac as the fraction of data to keep in the training set. Usually this is set to 0.8 or 0.9. The rest of the data will be split in half to create the validation and testing data.
End of explanation
"""
lstm_size = 256
lstm_layers = 1
batch_size = 500
learning_rate = 0.001
"""
Explanation: With train, validation, and text fractions of 0.8, 0.1, 0.1, the final shapes should look like:
Feature Shapes:
Train set: (20000, 200)
Validation set: (2500, 200)
Test set: (2500, 200)
Build the graph
Here, we'll build the graph. First up, defining the hyperparameters.
lstm_size: Number of units in the hidden layers in the LSTM cells. Usually larger is better performance wise. Common values are 128, 256, 512, etc.
lstm_layers: Number of LSTM layers in the network. I'd start with 1, then add more if I'm underfitting.
batch_size: The number of reviews to feed the network in one training pass. Typically this should be set as high as you can go without running out of memory.
learning_rate: Learning rate
End of explanation
"""
n_words = len(vocab_to_int) + 1 # Adding 1 because we use 0's for padding, dictionary started at 1
# Create the graph object
graph = tf.Graph()
# Add nodes to the graph
with graph.as_default():
inputs_ = tf.placeholder(tf.int32, [None, None], name='inputs')
labels_ = tf.placeholder(tf.int32, [None, None], name='labels')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
"""
Explanation: For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be batch_size vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability.
Exercise: Create the inputs_, labels_, and drop out keep_prob placeholders using tf.placeholder. labels_ needs to be two-dimensional to work with some functions later. Since keep_prob is a scalar (a 0-dimensional tensor), you shouldn't provide a size to tf.placeholder.
End of explanation
"""
# Size of the embedding vectors (number of units in the embedding layer)
embed_size = 300
with graph.as_default():
embedding = tf.Variable(tf.random_uniform((n_words, embed_size), -1, 1))
embed = tf.nn.embedding_lookup(embedding, inputs_)
"""
Explanation: Embedding
Now we'll add an embedding layer. We need to do this because there are 74000 words in our vocabulary. It is massively inefficient to one-hot encode our classes here. You should remember dealing with this problem from the word2vec lesson. Instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using word2vec, then load it here. But, it's fine to just make a new layer and let the network learn the weights.
Exercise: Create the embedding lookup matrix as a tf.Variable. Use that embedding matrix to get the embedded vectors to pass to the LSTM cell with tf.nn.embedding_lookup. This function takes the embedding matrix and an input tensor, such as the review vectors. Then, it'll return another tensor with the embedded vectors. So, if the embedding layer has 200 units, the function will return a tensor with size [batch_size, 200].
End of explanation
"""
with graph.as_default():
# Your basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)
# Getting an initial state of all zeros
initial_state = cell.zero_state(batch_size, tf.float32)
"""
Explanation: LSTM cell
<img src="assets/network_diagram.png" width=400px>
Next, we'll create our LSTM cells to use in the recurrent network (TensorFlow documentation). Here we are just defining what the cells look like. This isn't actually building the graph, just defining the type of cells we want in our graph.
To create a basic LSTM cell for the graph, you'll want to use tf.contrib.rnn.BasicLSTMCell. Looking at the function documentation:
tf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=<function tanh at 0x109f1ef28>)
you can see it takes a parameter called num_units, the number of units in the cell, called lstm_size in this code. So then, you can write something like
lstm = tf.contrib.rnn.BasicLSTMCell(num_units)
to create an LSTM cell with num_units. Next, you can add dropout to the cell with tf.contrib.rnn.DropoutWrapper. This just wraps the cell in another cell, but with dropout added to the inputs and/or outputs. It's a really convenient way to make your network better with almost no effort! So you'd do something like
drop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
Most of the time, your network will have better performance with more layers. That's sort of the magic of deep learning, adding more layers allows the network to learn really complex relationships. Again, there is a simple way to create multiple layers of LSTM cells with tf.contrib.rnn.MultiRNNCell:
cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)
Here, [drop] * lstm_layers creates a list of cells (drop) that is lstm_layers long. The MultiRNNCell wrapper builds this into multiple layers of RNN cells, one for each cell in the list.
So the final cell you're using in the network is actually multiple (or just one) LSTM cells with dropout. But it all works the same from an achitectural viewpoint, just a more complicated graph in the cell.
Exercise: Below, use tf.contrib.rnn.BasicLSTMCell to create an LSTM cell. Then, add drop out to it with tf.contrib.rnn.DropoutWrapper. Finally, create multiple LSTM layers with tf.contrib.rnn.MultiRNNCell.
Here is a tutorial on building RNNs that will help you out.
End of explanation
"""
with graph.as_default():
outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state)
"""
Explanation: RNN forward pass
<img src="assets/network_diagram.png" width=400px>
Now we need to actually run the data through the RNN nodes. You can use tf.nn.dynamic_rnn to do this. You'd pass in the RNN cell you created (our multiple layered LSTM cell for instance), and the inputs to the network.
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state)
Above I created an initial state, initial_state, to pass to the RNN. This is the cell state that is passed between the hidden layers in successive time steps. tf.nn.dynamic_rnn takes care of most of the work for us. We pass in our cell and the input to the cell, then it does the unrolling and everything else for us. It returns outputs for each time step and the final_state of the hidden layer.
Exercise: Use tf.nn.dynamic_rnn to add the forward pass through the RNN. Remember that we're actually passing in vectors from the embedding layer, embed.
End of explanation
"""
with graph.as_default():
predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)
cost = tf.losses.mean_squared_error(labels_, predictions)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
"""
Explanation: Output
We only care about the final output, we'll be using that as our sentiment prediction. So we need to grab the last output with outputs[:, -1], the calculate the cost from that and labels_.
End of explanation
"""
with graph.as_default():
correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
"""
Explanation: Validation accuracy
Here we can add a few nodes to calculate the accuracy which we'll use in the validation pass.
End of explanation
"""
def get_batches(x, y, batch_size=100):
n_batches = len(x)//batch_size
x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]
for ii in range(0, len(x), batch_size):
yield x[ii:ii+batch_size], y[ii:ii+batch_size]
"""
Explanation: Batching
This is a simple function for returning batches from our data. First it removes data such that we only have full batches. Then it iterates through the x and y arrays and returns slices out of those arrays with size [batch_size].
End of explanation
"""
epochs = 10
with graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
iteration = 1
for e in range(epochs):
state = sess.run(initial_state)
for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 0.5,
initial_state: state}
loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)
if iteration%5==0:
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Train loss: {:.3f}".format(loss))
if iteration%25==0:
val_acc = []
val_state = sess.run(cell.zero_state(batch_size, tf.float32))
for x, y in get_batches(val_x, val_y, batch_size):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: val_state}
batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)
val_acc.append(batch_acc)
print("Val acc: {:.3f}".format(np.mean(val_acc)))
iteration +=1
saver.save(sess, "checkpoints/sentiment.ckpt")
"""
Explanation: Training
Below is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. Before you run this, make sure the checkpoints directory exists.
End of explanation
"""
test_acc = []
with tf.Session(graph=graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
test_state = sess.run(cell.zero_state(batch_size, tf.float32))
for ii, (x, y) in enumerate(get_batches(test_x, test_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: test_state}
batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)
test_acc.append(batch_acc)
print("Test accuracy: {:.3f}".format(np.mean(test_acc)))
"""
Explanation: Testing
End of explanation
"""
|
dvirsamuel/MachineLearningCourses | Visual Recognision - Stanford/assignment3/ImageGradients.ipynb | gpl-3.0 | # As usual, a bit of setup
import time, os, json
import numpy as np
import skimage.io
import matplotlib.pyplot as plt
from cs231n.classifiers.pretrained_cnn import PretrainedCNN
from cs231n.data_utils import load_tiny_imagenet
from cs231n.image_utils import blur_image, deprocess_image
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
"""
Explanation: Image Gradients
In this notebook we'll introduce the TinyImageNet dataset and a deep CNN that has been pretrained on this dataset. You will use this pretrained model to compute gradients with respect to images, and use these image gradients to produce class saliency maps and fooling images.
End of explanation
"""
data = load_tiny_imagenet('cs231n/datasets/tiny-imagenet-100-A/tiny-imagenet-100-A', subtract_mean=True)
"""
Explanation: Introducing TinyImageNet
The TinyImageNet dataset is a subset of the ILSVRC-2012 classification dataset. It consists of 200 object classes, and for each object class it provides 500 training images, 50 validation images, and 50 test images. All images have been downsampled to 64x64 pixels. We have provided the labels for all training and validation images, but have withheld the labels for the test images.
We have further split the full TinyImageNet dataset into two equal pieces, each with 100 object classes. We refer to these datasets as TinyImageNet-100-A and TinyImageNet-100-B; for this exercise you will work with TinyImageNet-100-A.
To download the data, go into the cs231n/datasets directory and run the script get_tiny_imagenet_a.sh. Then run the following code to load the TinyImageNet-100-A dataset into memory.
NOTE: The full TinyImageNet-100-A dataset will take up about 250MB of disk space, and loading the full TinyImageNet-100-A dataset into memory will use about 2.8GB of memory.
End of explanation
"""
for i, names in enumerate(data['class_names']):
print i, ' '.join('"%s"' % name for name in names)
"""
Explanation: TinyImageNet-100-A classes
Since ImageNet is based on the WordNet ontology, each class in ImageNet (and TinyImageNet) actually has several different names. For example "pop bottle" and "soda bottle" are both valid names for the same class. Run the following to see a list of all classes in TinyImageNet-100-A:
End of explanation
"""
# Visualize some examples of the training data
classes_to_show = 7
examples_per_class = 5
class_idxs = np.random.choice(len(data['class_names']), size=classes_to_show, replace=False)
for i, class_idx in enumerate(class_idxs):
train_idxs, = np.nonzero(data['y_train'] == class_idx)
train_idxs = np.random.choice(train_idxs, size=examples_per_class, replace=False)
for j, train_idx in enumerate(train_idxs):
img = deprocess_image(data['X_train'][train_idx], data['mean_image'])
plt.subplot(examples_per_class, classes_to_show, 1 + i + classes_to_show * j)
if j == 0:
plt.title(data['class_names'][class_idx][0])
plt.imshow(img)
plt.gca().axis('off')
plt.show()
"""
Explanation: Visualize Examples
Run the following to visualize some example images from random classses in TinyImageNet-100-A. It selects classes and images randomly, so you can run it several times to see different images.
End of explanation
"""
model = PretrainedCNN(h5_file='cs231n/datasets/pretrained_model.h5')
"""
Explanation: Pretrained model
We have trained a deep CNN for you on the TinyImageNet-100-A dataset that we will use for image visualization. The model has 9 convolutional layers (with spatial batch normalization) and 1 fully-connected hidden layer (with batch normalization).
To get the model, run the script get_pretrained_model.sh from the cs231n/datasets directory. After doing so, run the following to load the model from disk.
End of explanation
"""
batch_size = 100
# Test the model on training data
mask = np.random.randint(data['X_train'].shape[0], size=batch_size)
X, y = data['X_train'][mask], data['y_train'][mask]
y_pred = model.loss(X).argmax(axis=1)
print 'Training accuracy: ', (y_pred == y).mean()
# Test the model on validation data
mask = np.random.randint(data['X_val'].shape[0], size=batch_size)
X, y = data['X_val'][mask], data['y_val'][mask]
y_pred = model.loss(X).argmax(axis=1)
print 'Validation accuracy: ', (y_pred == y).mean()
"""
Explanation: Pretrained model performance
Run the following to test the performance of the pretrained model on some random training and validation set images. You should see training accuracy around 90% and validation accuracy around 60%; this indicates a bit of overfitting, but it should work for our visualization experiments.
End of explanation
"""
def indices_to_one_hot(data, nb_classes):
"""Convert an iterable of indices to one-hot encoded labels."""
targets = np.array(data).reshape(-1)
return np.eye(nb_classes)[targets]
def compute_saliency_maps(X, y, model):
"""
Compute a class saliency map using the model for images X and labels y.
Input:
- X: Input images, of shape (N, 3, H, W)
- y: Labels for X, of shape (N,)
- model: A PretrainedCNN that will be used to compute the saliency map.
Returns:
- saliency: An array of shape (N, H, W) giving the saliency maps for the input
images.
"""
saliency = None
##############################################################################
# TODO: Implement this function. You should use the forward and backward #
# methods of the PretrainedCNN class, and compute gradients with respect to #
# the unnormalized class score of the ground-truth classes in y. #
##############################################################################
out, cache = model.forward(X)
dout = indices_to_one_hot(y,100)
dX, grads = model.backward(dout, cache)
saliency = np.max(np.abs(dX),axis=1)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return saliency
"""
Explanation: Saliency Maps
Using this pretrained model, we will compute class saliency maps as described in Section 3.1 of [1].
As mentioned in Section 2 of the paper, you should compute the gradient of the image with respect to the unnormalized class score, not with respect to the normalized class probability.
You will need to use the forward and backward methods of the PretrainedCNN class to compute gradients with respect to the image. Open the file cs231n/classifiers/pretrained_cnn.py and read the documentation for these methods to make sure you know how they work. For example usage, you can see the loss method. Make sure to run the model in test mode when computing saliency maps.
[1] Karen Simonyan, Andrea Vedaldi, and Andrew Zisserman. "Deep Inside Convolutional Networks: Visualising
Image Classification Models and Saliency Maps", ICLR Workshop 2014.
End of explanation
"""
def show_saliency_maps(mask):
mask = np.asarray(mask)
X = data['X_val'][mask]
y = data['y_val'][mask]
saliency = compute_saliency_maps(X, y, model)
for i in xrange(mask.size):
plt.subplot(2, mask.size, i + 1)
plt.imshow(deprocess_image(X[i], data['mean_image']))
plt.axis('off')
plt.title(data['class_names'][y[i]][0])
plt.subplot(2, mask.size, mask.size + i + 1)
plt.title(mask[i])
plt.imshow(saliency[i])
plt.axis('off')
plt.gcf().set_size_inches(10, 4)
plt.show()
# Show some random images
mask = np.random.randint(data['X_val'].shape[0], size=5)
show_saliency_maps(mask)
# These are some cherry-picked images that should give good results
show_saliency_maps([128, 3225, 2417, 1640, 4619])
"""
Explanation: Once you have completed the implementation in the cell above, run the following to visualize some class saliency maps on the validation set of TinyImageNet-100-A.
End of explanation
"""
def make_fooling_image(X, target_y, model):
"""
Generate a fooling image that is close to X, but that the model classifies
as target_y.
Inputs:
- X: Input image, of shape (1, 3, 64, 64)
- target_y: An integer in the range [0, 100)
- model: A PretrainedCNN
Returns:
- X_fooling: An image that is close to X, but that is classifed as target_y
by the model.
"""
X_fooling = X.copy()
##############################################################################
# TODO: Generate a fooling image X_fooling that the model will classify as #
# the class target_y. Use gradient ascent on the target class score, using #
# the model.forward method to compute scores and the model.backward method #
# to compute image gradients. #
# #
# HINT: For most examples, you should be able to generate a fooling image #
# in fewer than 100 iterations of gradient ascent. #
##############################################################################
#current_loss, grads = model.loss(X_fooling,target_y)
scores, cache = model.forward(X_fooling)
i = 0
while scores.argmax() != target_y:
print(i,scores.argmax(),target_y)
dout = indices_to_one_hot(target_y,100)
dX, grads = model.backward(dout, cache)
X_fooling += 200 * dX
scores, cache = model.forward(X_fooling)
i += 1
##############################################################################
# END OF YOUR CODE #
##############################################################################
return X_fooling
"""
Explanation: Fooling Images
We can also use image gradients to generate "fooling images" as discussed in [2]. Given an image and a target class, we can perform gradient ascent over the image to maximize the target class, stopping when the network classifies the image as the target class. Implement the following function to generate fooling images.
[2] Szegedy et al, "Intriguing properties of neural networks", ICLR 2014
End of explanation
"""
# Find a correctly classified validation image
while True:
i = np.random.randint(data['X_val'].shape[0])
X = data['X_val'][i:i+1]
y = data['y_val'][i:i+1]
y_pred = model.loss(X)[0].argmax()
if y_pred == y: break
target_y = 67
X_fooling = make_fooling_image(X, target_y, model)
# Make sure that X_fooling is classified as y_target
scores = model.loss(X_fooling)
assert scores[0].argmax() == target_y, 'The network is not fooled!'
# Show original image, fooling image, and difference
plt.subplot(1, 3, 1)
plt.imshow(deprocess_image(X, data['mean_image']))
plt.axis('off')
plt.title(data['class_names'][y][0])
plt.subplot(1, 3, 2)
plt.imshow(deprocess_image(X_fooling, data['mean_image'], renorm=True))
plt.title(data['class_names'][target_y][0])
plt.axis('off')
plt.subplot(1, 3, 3)
plt.title('Difference')
plt.imshow(deprocess_image(X - X_fooling, data['mean_image']))
plt.axis('off')
plt.show()
"""
Explanation: Run the following to choose a random validation set image that is correctly classified by the network, and then make a fooling image.
End of explanation
"""
|
MatthewDaws/OSMDigest | notebooks/Pythonify.ipynb | mit | import osmdigest.pythonify as pythonify
import os
basedir = os.path.join("/media/disk", "OSM_Data")
filename = "illinois-latest.osm.xz"
"""
Explanation: Pythonify
If you have a reasonable amount of ram, then it's possible to load quite big XML files fully into memory and to general python dictionaries from them. These can then be saved out in compressed pickled format.
This is a low-tech, non-portable solution for subsequently quickly extracting data, but it's also quick and easy.
End of explanation
"""
tags = pythonify.Tags(os.path.join(basedir, filename))
pythonify.pickle(tags, "illinois_tags.pic.xz")
os.stat("illinois_tags.pic.xz").st_size / 1024**2
tags.nodes_from_key("name")[:5]
tags_by_id = pythonify.TagsById(tags)
tags_by_id.node(701092)
"""
Explanation: Extract tags
End of explanation
"""
tags = pythonify.unpickle("illinois_tags.pic.xz")
list(tags.all_relation_tag_keys)[:5]
tags.relations_from_key("tower:type")
pythonify.TagsById(tags).relation(5813084)
"""
Explanation: Load tags back in
End of explanation
"""
nodes = pythonify.Nodes(os.path.join(basedir, filename))
pythonify.pickle(nodes, "illinois_nodes.pic.xz")
os.stat("illinois_nodes.pic.xz").st_size / 1024**2
"""
Explanation: Extract nodes
This is typically the most memory intensive operation.
End of explanation
"""
ways = pythonify.Ways(os.path.join(basedir, filename))
pythonify.pickle(ways, "illinois_ways.pic.xz")
os.stat("illinois_ways.pic.xz").st_size / 1024**2
relations = pythonify.Relations(os.path.join(basedir, filename))
pythonify.pickle(relations, "illinois_relations.pic.xz")
os.stat("illinois_relations.pic.xz").st_size / 1024**2
"""
Explanation: Extract ways and relations
End of explanation
"""
nodes = pythonify.unpickle("illinois_nodes.pic.xz")
nodes = pythonify.NodesPacked.from_Nodes(nodes)
i = iter(nodes)
for j in range(10):
print(next(i))
nodes[700732]
pythonify.pickle(nodes, "illinois_nodes_packed.pic.xz")
os.stat("illinois_nodes_packed.pic.xz").st_size / 1024**2
"""
Explanation: Load back node data and recompress
End of explanation
"""
ways = pythonify.unpickle("illinois_ways.pic.xz")
print(next(iter(ways)))
nodes[20326165], nodes[33235915]
"""
Explanation: Load back way data
End of explanation
"""
import osmdigest.pythonify as pythonify
import os
basedir = os.path.join("/media/disk", "OSM_Data")
filename = "california-latest.osm.xz"
pythonify.pythonify_and_pickle(os.path.join(basedir, filename), os.path.join(basedir, "california"))
"""
Explanation: Process California data in one go
End of explanation
"""
|