repo_name
stringlengths 6
77
| path
stringlengths 8
215
| license
stringclasses 15
values | content
stringlengths 335
154k
|
---|---|---|---|
hankcs/HanLP | plugins/hanlp_demo/hanlp_demo/zh/tst_restful.ipynb | apache-2.0 | !pip install hanlp_restful -U
"""
Explanation: <h2 align="center">点击下列图标在线运行HanLP</h2>
<div align="center">
<a href="https://colab.research.google.com/github/hankcs/HanLP/blob/doc-zh/plugins/hanlp_demo/hanlp_demo/zh/tst_restful.ipynb" target="_blank"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
<a href="https://mybinder.org/v2/gh/hankcs/HanLP/doc-zh?filepath=plugins%2Fhanlp_demo%2Fhanlp_demo%2Fzh%2Ftst_restful.ipynb" target="_blank"><img src="https://mybinder.org/badge_logo.svg" alt="Open In Binder"/></a>
</div>
安装
无论是Windows、Linux还是macOS,HanLP的安装只需一句话搞定:
End of explanation
"""
from hanlp_restful import HanLPClient
HanLP = HanLPClient('https://www.hanlp.com/api', auth=None, language='zh') # auth不填则匿名,zh中文,mul多语种
"""
Explanation: 创建客户端
End of explanation
"""
HanLP.text_style_transfer(['国家对中石油抱有很大的期望.', '要用创新去推动高质量的发展。'],
target_style='gov_doc')
"""
Explanation: 申请秘钥
由于服务器算力有限,匿名用户每分钟限2次调用。如果你需要更多调用次数,建议申请免费公益API秘钥auth。
文本风格转换
输入短文本以及目标风格,执行文本风格转换:
End of explanation
"""
|
felipessalvatore/CNNexample | src/tutorials/notMNIST.ipynb | mit | import os
import sys
import tensorflow as tf
import inspect
import matplotlib.pyplot as plt
import numpy as np
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from util import get_data_4d, plot9images,randomize_in_place
from CNN import CNNModel, train_model,check_test,one_prediction
from DataHolder import DataHolder
from Config import Config
"""
Explanation: CNN Example: notMNIST dataset
The notMNIST dataset is a example similar to MNIST. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes).
End of explanation
"""
train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = get_data_4d()
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
"""
Explanation: Importing all the data
End of explanation
"""
train_classes = np.argmax(train_labels, axis=1)
train_classes = [chr(i + ord('A')) for i in train_classes]
img_size = 28
img_shape = (img_size, img_size)
images = train_dataset[0:9]
cls_true = train_classes[0:9]
plot9images(images, cls_true, img_shape)
"""
Explanation: Visualizing some examples
End of explanation
"""
my_config = Config()
print("batch_size = {}".format(my_config.batch_size))
print("patch_size = {}".format(my_config.patch_size))
print("image_size = {}".format(my_config.image_size))
print("num_labels = {}".format(my_config.num_labels))
print("num_channels = {}".format(my_config.num_channels))
print("num_filters_1 = {}".format(my_config.num_filters_1))
print("num_filters_2 = {}".format(my_config.num_filters_2))
print("hidden_nodes_1 = {}".format(my_config.hidden_nodes_1))
print("hidden_nodes_2 = {}".format(my_config.hidden_nodes_2))
print("hidden_nodes_3 = {}".format(my_config.hidden_nodes_3))
print("learning_rate = {}".format(my_config.learning_rate))
print("steps_for_decay = {}".format(my_config.steps_for_decay))
print("decay_rate = {}".format(my_config.decay_rate))
"""
Explanation: The hyperparameters of the model are
End of explanation
"""
my_dataholder = DataHolder(train_dataset,
train_labels,
valid_dataset,
valid_labels,
test_dataset,
test_labels)
my_model = CNNModel(my_config, my_dataholder)
train_model(my_model, my_dataholder, num_steps=10001, show_step=1000)
"""
Explanation: Now, training the model using 10001 steps
End of explanation
"""
print("Test accuracy: %.2f%%" % (check_test(my_model) * 100))
"""
Explanation: Cheking the trained model with the test dataset
End of explanation
"""
randomize_in_place(valid_dataset, valid_labels, 0)
valid_classes = np.argmax(valid_labels, axis=1)
valid_classes = [chr(i + ord('A')) for i in valid_classes]
cls_true = valid_classes[0:9]
images = valid_dataset[0:9]
images = [image.reshape(1,
image.shape[0],
image.shape[1],
image.shape[2]) for image in images]
predictions = [chr(one_prediction(my_model, image) + ord('A')) for image in images]
plot9images(images, cls_true, img_shape)
"""
Explanation: Seeing the model perform in 9 images from the valid dataset
End of explanation
"""
|
JoseGuzman/myIPythonNotebooks | MachineLearning/K-means_clustering.ipynb | gpl-2.0 | %pylab inline
# generate some data
def create_cluster(npoints, n_clusters):
"""
create clustered data
Arguments:
ncluster -- (int) number of clusters
npoints -- (int) number of data points in every cluster
Returns a 2D NumPy array of shape npoints, 2
"""
np.random.seed(10)
datax, datay = list(), list()
for _ in range(n_clusters):
dY = np.random.uniform(low = 20e3, high = 200e3) # dispersion in X
dX = np.random.uniform(low = 20.0, high = 70.0) # dispersion in Y
datax.append( np.random.normal(loc = dX, scale = 2.0 , size = npoints) )
datay.append( np.random.normal(loc = dY, scale = 10e3, size = npoints) )
X, Y = np.concatenate(datax), np.concatenate(datay)
return np.array([X,Y]).T
data = create_cluster(npoints = 20, n_clusters = 5)
data.shape
plt.plot(data[:,0], data[:,1], 'ko', markersize=5);
plt.xlabel('X'), plt.ylabel('Y');
from sklearn.cluster import KMeans
mymodel = KMeans(n_clusters=5)
# Note I'm scaling the data to normalize it! Important for good results.
mymodel.fit(data)
# We can look at the clusters each data point was assigned to
print mymodel.labels_
# And we'll visualize it:
plt.scatter(data[:,0], data[:,1], c=mymodel.labels_.astype(float));
"""
Explanation: <H1>K-means clustering</H1>
The k-means clustering is a form of in unsupervised learning. The k-means clustering divide n observations into k clusters in which each observation belongs to the cluster with the closest mean (centroid).
End of explanation
"""
from sklearn.preprocessing import scale
x = np.random.normal(loc=10, scale=2, size=10)
print('[Data]-> Mean = %2.4f, StDev = %2.4f'% (x.mean(), x.std()))
y = scale(x)
print('[Norm]-> Mean = %2.4f, StDev = %2.4f'% (y.mean(), y.std()) )
mymodel.fit(scale(data))
plt.scatter(data[:,0], data[:,1], c=mymodel.labels_.astype(float));
mymodel.score(data) #???
"""
Explanation: <H2> Normalizing the data</H2>
To account for the differences mean values of the data, we normalize the vector
End of explanation
"""
# I found a KMeans.fit()'s attribute "inertia_
inertia = list()
k_values = range(1,10)
for k in k_values:
model = KMeans( n_clusters = k )
model.fit( scale(data) )
inertia.append( model.inertia_ )
print('k = %d, r2 = %2.4f'%(k, model.inertia_ ))
# calculate the inertia relative the first value ( no groups, k=1 )
inertia = inertia[0] - np.array(inertia)
# Look for bending points
plt.plot(k_values, inertia, lw=4)
plt.xlabel('K-value'), plt.ylabel('Inertia');
"""
Explanation: <H2>Inertia</H2>
Shows sum of distances of samples to their closest cluster center.
End of explanation
"""
|
KIPAC/StatisticalMethods | tutorials/gaussians.ipynb | gpl-2.0 | exec(open('tbc.py').read()) # define TBC and TBC_above
import numpy as np
import scipy.stats as st
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
%matplotlib inline
"""
Explanation: Tutorial: Gaussians and Least Squares
So far in the notes and problems, we've mostly avoided one of the most commonly used probability distributions, the Gaussian or normal distribution:
$\mathrm{Normal}(x|\mu,\sigma) \equiv p_\mathrm{Normal}(x|\mu,\sigma) = \frac{1}{\sqrt{2\pi\sigma}}\exp \left[-\frac{(x-\mu)^2}{2\sigma^2}\right]$. [Endnote 1]
There are two reasons for this:
1. The symmetry between $x$ and $\mu$ makes it easy to miss the distinction between the sampling distribution and the likelihood function, and to conflate the model parameter $\sigma$ with an "error bar" associated strictly with the data (which it may or may not be).
2. The assumption of Gaussian PDFs is baked into various classical statistics methods to the extent that it isn't always obvious to the user. As always, it's important to think about whether an assumption or approximation is justified, and thus to see examples of when it is not.
That said, it is certainly common to use Gaussian distributions in practice, particularly in cases where
1. the approximation is well justified, as in the large-count limit of the Poisson distribution (typical of optical astronomy and longer wavelengths); or
2. we are effectively handed a table of data with "error bars" and have no better alternative than to assume a Gaussian sampling distribution.
Gaussians have lots of nice mathematical features that make them convenient to work with when we can. For example, see a list of identities for the multivariate Gaussian here or here.
There are a couple of cases that it's useful to work through if you haven't before, to build intuition. We'll do that here, with:
the product of two Gaussians
showing conjugacy
linear transformations
extending classical weighted least squares
End of explanation
"""
TBC()
# pick some values (where m is mu, s sigma)
# x =
# m1 =
# s1 =
# m2 =
# s2 =
# compute things
# sa =
# ma =
# mb =
# sb =
def exp_part(y, m, s):
return ((y - m) / s)**2
print('This should be a pretty small number:',
exp_part(x,m1,s1) + exp_part(x,m2,s2) - ( exp_part(x,ma,sa) + exp_part(0,mb,sb) ) )
"""
Explanation: 1. Multiplication
The product of Gaussians comes up, for example, when the sampling distributions for different data points are independent Gaussians, or when the sampling distribution and prior are both Gaussian (this is a conjugate pair).
So, consider
$\mathrm{Normal}(x|\mu_1,\sigma_1) \, \mathrm{Normal}(x|\mu_2,\sigma_2)$.
This can be manipulated into a different product of two Gaussians, with $x$ appearing in only one of them. Do so. (Note that this is a proportionality, not an equality - the coefficient in front will not perfectly normalize things when you're done.)
$\mathrm{Normal}(x|\mu_1,\sigma_1) \, \mathrm{Normal}(x|\mu_2,\sigma_2) \propto \mathrm{Normal}(x|\mu_a,\sigma_a) \, \mathrm{Normal}(0|\mu_b,\sigma_b)$.
If $x$ were a model parameter, and $\mu_i$ and $\sigma_i$ were independent measurements of $x$ with error bars, how do you interpret each term of this factorization?
math, math, math, math,
Check your solution by plugging in some values for $x$, $\mu_i$ and $\sigma_i$. The function below returns the $\frac{(x-\mu)^2}{\sigma^2}$ part of the PDF, which is what we care about here (since it's where $x$ appears).
End of explanation
"""
TBC()
# pick some values
# y =
# sigma =
# m0 =
# s0 =
# compute things
# s1 =
# m1 =
# plot
mugrid = np.arange(-1.0, 2.0, 0.01)
# we'll compare the log-probabilities, since that's a good habit to be in
diff = st.norm.logpdf(y, loc=mugrid, scale=sigma)+st.norm.logpdf(mugrid, loc=m0, scale=s0) - st.norm.logpdf(mugrid, loc=m1, scale=s1)
print('This should be a pretty small number, and constant:')
plt.rcParams['figure.figsize'] = (7.0, 5.0)
plt.plot(mugrid, diff, 'b-');
plt.xlabel(r'$\mu$');
plt.ylabel('log-posterior difference');
"""
Explanation: 2. Conjugacy
When the sampling distribution is normal with a fixed variance, the conjugate prior for the mean is also normal. Show this for the case of a single data point, $y$; that is,
$p(\mu|y,\sigma) \propto \mathrm{Normal}(y|\mu,\sigma)\,\mathrm{Normal}(\mu|m_0,s_0) \propto \mathrm{Normal}(\mu|m_1,s_1)$
and find $m_1$ and $s_1$ in terms of $y$, $\sigma$, $m_0$ and $s_0$.
math, math, math, math
Again, check your work by choosing some fiducial values and
looking at the ratio $\mathrm{Normal}(y|\mu,\sigma)\,\mathrm{Normal}(\mu|m_0,s_0) / \mathrm{Normal}(\mu|m_1,s_1)$ over a range of $\mu$. It should be constant.
End of explanation
"""
# generate some fake data
a = 0.0
b = 1.0
n = 10
x = st.norm.rvs(size=n)
sigma = st.uniform.rvs(1.0, 2.0, size=n)
y = st.norm.rvs(loc=a+b*x, scale=sigma, size=n)
plt.rcParams['figure.figsize'] = (7.0, 5.0)
plt.errorbar(x, y, yerr=sigma, fmt='bo');
plt.xlabel('x');
plt.ylabel('y');
"""
Explanation: 3. Linear transformation
Consider the distribution
$\mathrm{Normal}\left[y\,\big|\,\mu_y(x;a,b),\sigma_y\right]$,
where $\mu_y(x;a,b)=a+bx$. Re-express this in terms of a distribution over $x$, i.e.
$\mathrm{Normal}\left[x|\mu_x(y;a,b),\sigma_x(y;a,b)\right]$.
math, math, math, math
4. Classical weighted least squares
Classical WLS is a simple method for fitting a line to data that you've almost certainly seen before. Consider data consisting of $n$ triplets $(x_i,y_i,\sigma_i)$, where $x_i$ are assumed to be known perfectly and $\sigma_i$ is interpreted as a "measurement error" for $y_i$. WLS maximizes the likelihood function
$\mathcal{L}(a,b;x,y,\sigma) = \prod_{i=1}^n \mathrm{Normal}(y_i|a+bx_i,\sigma_i)$.
In fact, we can get away with being more general and allowing for the possibility that the different measurements are not independent, with their measurement errors jointly characterized by a known covariance matrix, $\Sigma$, rather than the individual $\sigma_i$:
$\mathcal{L}(a,b;x,y,\Sigma) = \mathrm{Normal}(y|X\beta,\Sigma) = \frac{1}{\sqrt{(2\pi)^n|\Sigma|}}\exp \left[-\frac{1}{2}(y-X\beta)^\mathrm{T}\Sigma^{-1}(y-X\beta)\right]$,
where $X$ is called the design matrix, with each row equal to $(1, x_i)$, and $\beta = \left(\begin{array}{c}a\b\end{array}\right)$.
With a certain amount of algebra, it can be shown that $\mathcal{L}$ is proportional to a bivariate Gaussian over $\beta$,
$\mathcal{L} \propto \mathrm{Normal}(\beta | \mu_\beta, \Sigma_\beta)$,
with
$\Sigma_\beta = (X^\mathrm{T}\Sigma^{-1}X)^{-1}$;
$\mu_\beta = \Sigma_\beta X^\mathrm{T}\Sigma^{-1} y$.
In classical WLS, $\mu_\beta$ is the "best fit" estimate of $a$ and $b$, and $\Sigma_\beta$ is the covariance of the standard errors on those parameters.
The relative simplicity of the computations above, not to mention the fact that they are efficiently implemented in numerous packages, can be useful even in situations beyond the assumption-heavy scenario where WLS is derived. As a simple example, consider a case where the sampling distribution corresponds to the likelihood function above, but we wish to use an informative prior on $a$ and $b$.
Taking advantage of the results you derived above (all of which have straightforward multivariate analogs),
1. What is the form of prior, $p(a,b|\alpha)$, that makes this problem conjugate? (Here $\alpha$ is a stand-in for whatever parameters determine the prior.)
2. What are the form and parameters of the posterior, $p(a,b|x,y,\Sigma,\alpha)$?
3. Verify that you recover the WLS solution in the limit of the prior being uniform over the $(a,b)$ plane.
1.
Below, we will explicitly show the correspondance in (3) for a WLS fit of some mock data.
End of explanation
"""
import statsmodels.api as sm
model = sm.WLS(y, sm.add_constant(x), weights=sigma**-2)
wls = model.fit()
mu_beta = np.matrix(wls.params).T # cast as a column vector
Sigma_beta = np.asmatrix(wls.normalized_cov_params)
"""
Explanation: The next cell uses the statsmodels package to perform the WLS calculations. You are encouraged to implement the matrix algebra above to verify the results. What we get at the end are $\mu_\beta$ and $\Sigma_\beta$, as defined above.
End of explanation
"""
TBC()
# define prior parameters
# do some calculations, possibly
# parameters of the posterior:
# post_cov = ...
# post_mean = ...
"""
Explanation: Now, compute the parameters of the posterior for $\beta$ based on $\mu_\beta$ and $\Sigma_\beta$ (parameters that appear in the sampling distribution) and the parameters of the conjugate prior. Set the prior parameters to be equivalent to the uniform distribution for the check below (you can put in something different to see how it looks later).
Transform post_mean to a shape (2,) numpy array for convenience (as opposed to, say, a 2x1 matrix).
End of explanation
"""
print('WLS mean and covariance:')
print(mu_beta)
print(Sigma_beta)
print('Posterior mean and covariance:')
print(post_mean)
print(post_cov)
"""
Explanation: Compare the WLS and posterior parameters (they should be identical for a uniform prior):
End of explanation
"""
def log_post_brute(a, b):
like = np.sum( st.norm.logpdf(y, loc=a+b*x, scale=sigma) )
prior = st.multivariate_normal.logpdf([a,b], mean=np.asarray(prior_mean)[:,0], cov=prior_cov)
return prior + like
print('Difference between elegant and brute-force log posteriors for some random parameter values:')
print('(The third column should be basically constant, though non-zero.)\n')
for i in range(10):
a = np.random.rand() * 10.0 - 5.0
b = np.random.rand() * 10.0 - 5.0
diff = st.multivariate_normal.logpdf([a,b], mean=post_mean, cov=post_cov) - log_post_brute(a,b)
print([a, b, diff])
"""
Explanation: Below, we can compare your analytic solution to a brute-force calculation of the posterior:
End of explanation
"""
|
totalgood/twip | docs/notebooks/03 Data -- Getting Selective.ipynb | mit | GB = 8 * (100 * 1000 * len(tfdf)) / 1.e9
GB
tfdf
"""
Explanation: If you try to allocate a 16k word by 100k document DataFrame of 64-bit integers, you'll get a memory error on a 16 GB laptop.
Later we'll learn about "constant RAM" tools that can handle an unlimitted stream of documents with a large (1M word) vocabulary. But first let's be frugal and see what we can do with robust, mature tools like Pandas.
Rather than cutting back on those 100k tweets, lets cut back on the words. What are all those 16k words and how often are they all used (maybe we can ignore infrequent words).
End of explanation
"""
tfdf = tfdf[tfdf.df > 9]
tfdf = tfdf[(tfdf.df > 9) & (((tfdf.df - tfdf.tf) / tfdf.tf) < 0.15)]
tfdf = tfdf[(tfdf.df > 20) & (((tfdf.df - tfdf.tf) / tfdf.tf) < 0.15)]
tfdf
Numpy arrays (guts of Pandas DataFrame) require 8 bytes for each double-precision value (int64)
GB = 8 * (100 * 1000 * len(tfdf)) / 1.e9
GB
"""
Explanation: Fortunately the odd words are at the top and bottom of an alphabetical index!
And it does look like the less useful tokens aren't used many times or in many documents.
What do you notice that might help distinguish "natural" words (zoom, zoos, zope, zynga) from URLs and machine-code (000, zzp, zsl107)?
End of explanation
"""
url_scheme_popular = r'(\b(' + '|'.join(uri_schemes_popular) + r')[:][/]{2})'
fqdn_popular = r'(\b[a-zA-Z0-9-.]+\b([.]' + r'|'.join(tld_popular) + r'\b)\b)'
url_path = r'(\b[\w/?=+#-_&%~\'"\\.,]*\b)'
pd.set_option('display.max_rows', 14)
pd.Series(uri_schemes_iana)
url_popular = r'(\b' + r'(http|https|svn|git|apt)[:]//' + fqdn_popular + url_path + r'\b)'
tweet = "Play the [postiive sum game](http://totalgood.com/a/b?c=42) of life instead of svn://us.gov."
import re
re.findall(url_popular, tweet)
# email = re.compile(r'^([\w-]+(?:\.[\w-]+)*)@((?:[\w-]+\.)*\w[\w-]{0,66})\.([a-z]{2,6}(?:\.[a-z]{2})?)')
fqdn = r'(\b[a-zA-Z0-9-.]+([.]' + r'|'.join(tld_iana) + r')\b)'
fqdn_popular = r'(\b[a-zA-Z0-9-.]+\b([.]' + r'|'.join(tld_popular) + r'\b)\b)'
username = r'(\b[a-zA-Z0-9-.!#$%&*+-/=?^_`{|}~]+\b)'
email = re.compile(r'(\b' + username + r'\b@\b' + fqdn + r'\b)')
email_popular = re.compile(r'(\b' + username + r'\b@\b' + fqdn_popular + r'\b)')
# TODO: unmatched surrounding symbols are accepted/consumed, likewise for multiple dots/ats
at = r'(([-@="_(\[{\|\s]+(at|At|AT)[-@="_)\]\}\|\s]+)|[@])'
dot = r'(([-.="_(\[{\|\s]+(dot|dt|Dot|DOT)[-.="_)\]\}\|\s]+)|[.])'
fqdn_obfuscated = r'(\b(([a-zA-Z0-9-]+' + dot + r'){1,7})(' + r'|'.join(tld_iana) + r')\b)'
fqdn_popular_obfuscated = r'(\b(([a-zA-Z0-9-]+' + dot + r'){1,7})(' + r'|'.join(tld_popular) + r')\b)'
username_obfuscated = r'(([a-zA-Z0-9!#$%&*+/?^`~]+' + dot + r'?){1,7})'
email_obfuscated = re.compile(r'(\b' + username_obfuscated + at + fqdn_obfuscated + r'\b)')
email_popular_obfuscated = re.compile(r'(\b' + username_obfuscated + at + fqdn_popular_obfuscated + r'\b)')
url_path = r'(\b[^\s]+)'
url_scheme = r'(\b(' + '|'.join(uri_schemes_iana) + r')[:][/]{2})'
url_scheme_popular = r'(\b(' + '|'.join(uri_schemes_popular) + r')[:][/]{2})'
url = r'(\b' + url_scheme + fqdn + url_path + r'?\b)'
"""
Explanation: Memory requirements (4 GB) are doable
But we've lost important words: "zoom"
And there's still a bit of garbage: "zh3gs0wbno"
These look like keys, slugs, hashes or URLs
Even though the tweets.json format includes a column for URLs
The URLs are left within the raw text as well
Let's use a formal but simple grammar engine:
Extended regular expressions
End of explanation
"""
|
Neurosim-lab/netpyne | netpyne/tutorials/voltage_movie_tut/voltage_movie_tut.ipynb | mit | import urllib.request
urllib.request.urlretrieve('https://raw.githubusercontent.com/Neurosim-lab/netpyne/development/doc/source/code/BS0284.swc', 'BS0284.swc')
"""
Explanation: Making a movie of voltage activity
We'll create a simple network made up of one imported morphology.
First we need to download the morphology.
End of explanation
"""
from netpyne import specs, sim
%matplotlib
netParams = specs.NetParams()
cellRule = netParams.importCellParams(
label='swc_cell',
fileName='BS0284.swc',
cellName='BS0284',
)
"""
Explanation: Then we need to import the morphology.
End of explanation
"""
netParams.renameCellParamsSec('swc_cell', 'soma_0', 'soma')
"""
Explanation: For convenience, we'll rename the first soma section in the morphology from soma_0 to soma.
End of explanation
"""
for secName in cellRule['secs']:
cellRule['secs'][secName]['geom']['cm'] = 1
if secName.startswith('soma'):
cellRule['secs'][secName]['mechs']['hh'] = {
'gnabar': 0.12,
'gkbar': 0.036,
'gl': 0.003,
'el': -70,
}
else:
cellRule['secs'][secName]['mechs']['pas'] = {
'g': 0.0000357,
'e': -70,
}
"""
Explanation: Next we'll add Hodgkin-Huxley mechanisms to the soma and a passive leak mechanism everywhere else.
End of explanation
"""
netParams.popParams['swc_pop'] = {'cellType': 'swc_cell', 'numCells': 1}
"""
Explanation: Now we'll make a population out of our imported cell.
End of explanation
"""
netParams.synMechParams['exc'] = {
'mod': 'Exp2Syn',
'tau1': 0.1,
'tau2': 5.0,
'e': 0,
}
netParams.stimSourceParams['bkg'] = {
'type': 'NetStim',
'rate': 10,
'noise': 0.0,
}
netParams.stimTargetParams['bkg->swc_cell'] = {
'source': 'bkg',
'conds': {'cellType': 'swc_cell'},
'weight': 0.1,
'delay': 10,
'synMech': 'exc',
}
"""
Explanation: Now we'll add a stimulation into the soma to cause an action potential.
End of explanation
"""
cfg = specs.SimConfig()
cfg.filename = 'plotshape'
cfg.duration = 30
cfg.recordTraces = {'V_soma': {'sec': 'soma', 'loc': 0.5, 'var': 'v'}}
cfg.recordStep = 0.5
cfg.analysis['plotTraces'] = {'include': ['all'], 'showFig': True}
"""
Explanation: Then we'll set up the simulation configuration.
End of explanation
"""
sim.initialize(simConfig=cfg, netParams=netParams)
sim.net.createPops()
sim.net.createCells()
sim.net.connectCells()
sim.net.addStims()
sim.setupRecording()
"""
Explanation: At this point, we could complete everything with sim.createSimulateAnalyze(netParams=netParams, simConfig=cfg), but we want to plot a movie frame at a certain interval, so we need to execute the simulation commands individually.
End of explanation
"""
plotArgs = {
'includePre' : [0],
'includePost': [0],
'cvar' : 'voltage',
'clim' : [-70, -20],
'saveFig' : 'movie',
'showFig' : False,
}
"""
Explanation: At this point, we could run the simulation with sim.runSim(), but we want to execute the following at intervals:
sim.analysis.plotShape(
includePre = [0],
includePost = [0],
cvar = 'voltage',
clim = [-70, -20],
saveFig = 'movie',
showFig = False,
)
First we have to make a dictionary of the arguments we want to feed into plotShape:
End of explanation
"""
sim.runSimWithIntervalFunc(1.0, sim.analysis.plotShape, timeRange=[10, 20], funcArgs=plotArgs)
"""
Explanation: Then we can replace sim.runSim() with:
End of explanation
"""
sim.gatherData()
sim.saveData()
sim.analysis.plotData()
"""
Explanation: This will execute sim.analysis.plotShape every 1.0 ms from 10 to 20 ms in the simulation and feed it the plotArgs dictionary we created above.
Once we're done simulating, we need to wrap up the final steps manually:
End of explanation
"""
!python3 -m pip install natsort imageio
"""
Explanation: Once everything is complete, we'll need to install a couple Python packages to make a movie from our frames.
End of explanation
"""
import os
import natsort
import imageio
images = []
filenames = natsort.natsorted([file for file in os.listdir() if 'movie' in file and file.endswith('.png')])
for filename in filenames:
images.append(imageio.imread(filename))
imageio.mimsave('shape_movie.gif', images)
"""
Explanation: Then the following will create an animated gif from the individual figures.
End of explanation
"""
|
undercertainty/ou_nlp | semeval_experiments/Building a dataframe from a core file.ipynb | apache-2.0 | filename='semeval2013-task7/semeval2013-Task7-5way/beetle/train/Core/FaultFinding-BULB_C_VOLTAGE_EXPLAIN_WHY1.xml'
import pandas as pd
from xml.etree import ElementTree as ET
tree=ET.parse(filename)
"""
Explanation: A simple (ie. no error checking or sensible engineering) notebook to extract the student answer data from an xml file.
I'm not 100% sure what we actually need for the moment, so I'm just going to extract the student answer data from a single file. That is, I'm not at first going to use the reference answer etc.
End of explanation
"""
r=tree.getroot()
r[2]
"""
Explanation: The reference answers are the third daughter node of the tree:
End of explanation
"""
responses_ls=[{'accuracy':a.attrib['accuracy'], 'text':a.text, 'idx':i} for (i, a) in enumerate(r[2])]
responses_ls
"""
Explanation: Now iterate over the student answers to get the specific responses. For the moment, we'll just stick to the text and the accuracy. I'll also add an index term to make it a bit easier to convert to a dataframe.
End of explanation
"""
from string import punctuation
def to_tokens(textIn):
'''Convert the input textIn to a list of tokens'''
tokens_ls=[t.lower().strip(punctuation) for t in textIn.split()]
# remove any empty tokens
return [t for t in tokens_ls if t]
str='"Help!" yelped the banana, who was obviously scared out of his skin.'
print(str)
print(to_tokens(str))
"""
Explanation: Next, we need to carry out whatever analysis we want on the answers. In this case, we'll split on whitespace, convert to lower case, and strip punctuation. Feel free to redefine the to_tokens function to do whatever analysis you prefer.
End of explanation
"""
for resp_dict in responses_ls:
resp_dict['tokens']=to_tokens(resp_dict['text'])
responses_ls
"""
Explanation: So now we can apply the to_tokens function to each of the student responses:
End of explanation
"""
vocab_set=set()
for resp_dict in responses_ls:
vocab_set=vocab_set.union(set(resp_dict['tokens']))
len(vocab_set)
"""
Explanation: OK, good. So now let's see how big the vocabulary is for the complete set:
End of explanation
"""
docFreq_dict={}
for t in vocab_set:
docFreq_dict[t]=len([resp_dict for resp_dict in responses_ls if t in resp_dict['tokens']])
docFreq_dict
"""
Explanation: Now we can set up a document frequency dict:
End of explanation
"""
for resp_dict in responses_ls:
resp_dict['tfidf']={t:resp_dict['tokens'].count(t)/docFreq_dict[t] for t in resp_dict['tokens']}
responses_ls[6]
"""
Explanation: Now add a tf.idf dict to each of the responses:
End of explanation
"""
out_df=pd.DataFrame(index=docFreq_dict.keys())
for resp_dict in responses_ls:
out_df[resp_dict['idx']]=pd.Series(resp_dict['tfidf'], index=out_df.index)
out_df=out_df.fillna(0).T
out_df.head()
accuracy_ss=pd.Series({r['idx']:r['accuracy'] for r in responses_ls})
accuracy_ss.head()
"""
Explanation: Finally, convert the response data into a dataframe:
End of explanation
"""
|
fastai/course-v3 | zh-nbs/Lesson3_imdb.ipynb | apache-2.0 | %reload_ext autoreload
%autoreload 2
%matplotlib inline
from fastai.text import *
"""
Explanation: Practical Deep Learning for Coders, v3
Lesson3_imdb
IMDB影评数据
End of explanation
"""
path = untar_data(URLs.IMDB_SAMPLE)
path.ls()
"""
Explanation: Preparing the data 准备数据
First let's download the dataset we are going to study. The dataset has been curated by Andrew Maas et al. and contains a total of 100,000 reviews on IMDB. 25,000 of them are labelled as positive and negative for training, another 25,000 are labelled for testing (in both cases they are highly polarized). The remaning 50,000 is an additional unlabelled data (but we will find a use for it nonetheless).
首先,让我们先下载需要使用的数据。 IMDB 数据集由Andrew Maas等人收集,里面有10万条IMDB网站上的影评。其中2.5万条是积极的评论, 2.5万条是消极的评论, 另外2.5万条是用作测试的评论(这些数据两极分化得很厉害),剩余的5万条是额外的未标记的数据(以后我们会将这些数据用做其他用途)。
We'll begin with a sample we've prepared for you, so that things run quickly before going over the full dataset.
我们一起来看一下提前准备好的样本,这样会比跑遍整个数据集快一些。
End of explanation
"""
df = pd.read_csv(path/'texts.csv')
df.head()
df['text'][1]
"""
Explanation: It only contains one csv file, let's have a look at it.
例子里只包含了一个csv文档,我们一起来看一下里面的数据。
End of explanation
"""
data_lm = TextDataBunch.from_csv(path, 'texts.csv')
"""
Explanation: It contains one line per review, with the label ('negative' or 'positive'), the text and a flag to determine if it should be part of the validation set or the training set. If we ignore this flag, we can create a DataBunch containing this data in one line of code:
文档里的每一行都是一个影评,影评附有标签(“负面”或是“正面”)、评论文字以及一个标明是属于训练集还是验证集的标签,如果我们忽略这个(标明所属数据集的)标签,我们可以有下面这行代码来产生一个 DataBunch(数据堆):
End of explanation
"""
data_lm.save()
"""
Explanation: By executing this line a process was launched that took a bit of time. Let's dig a bit into it. Images could be fed (almost) directly into a model because they're just a big array of pixel values that are floats between 0 and 1. A text is composed of words, and we can't apply mathematical functions to them directly. We first have to convert them to numbers. This is done in two differents steps: tokenization and numericalization. A TextDataBunch does all of that behind the scenes for you.
运行这行代码会启动一个需要稍微花点时间的程序,让我们来更深入地了解一下。图像本质上是一个巨大的像素值数列,这个数列由0到1 之间的数字组成,因此图像数据基本上可以直接输入到模型中。但是,一段文字是由词组成的,而我们不能直接对词运用数学函数。那么我们首先需要将这些信息转化为数字。这一过程需要通过两部完成:分词和数值化。TextDataBunch在幕后为您完成所有这些工作。
Before we delve into the explanations, let's take the time to save the things that were calculated.
在我们开始讲解内容之前,让我们先花点时间将计算好的数据存档。
End of explanation
"""
data = load_data(path)
"""
Explanation: Next time we launch this notebook, we can skip the cell above that took a bit of time (and that will take a lot more when you get to the full dataset) and load those results like this:
下次我们启动这个notebook, 可以直接跳过之前稍费时间的单元格,直接用下面的代码载入之前保存的结果(如果你载入的是全部数据,之前这些步骤会花费更多时间):
End of explanation
"""
data = TextClasDataBunch.from_csv(path, 'texts.csv')
data.show_batch()
"""
Explanation: Tokenization 分词
The first step of processing we make the texts go through is to split the raw sentences into words, or more exactly tokens. The easiest way to do this would be to split the string on spaces, but we can be smarter:
处理数据的第一步是将文字分拆成单词, 或者更确切地说, 标准词(tokens)。最简单的方式是基于空格对句子进行分拆, 但我们能更智能地分词:
we need to take care of punctuation
<br>我们需要考虑标点
some words are contractions of two different words, like isn't or don't
<br>有些词是由两个不同的词缩写的,比如isn't或don't
we may need to clean some parts of our texts, if there's HTML code for instance
<br>我们可能需要清理文本的某些部分,比如文字中可能会有HTML代码
To see what the tokenizer had done behind the scenes, let's have a look at a few texts in a batch.<br>
为了明白分词器幕后是如何工作的,让我们来看一下数据堆中的一些文本。
End of explanation
"""
data.vocab.itos[:10]
"""
Explanation: The texts are truncated at 100 tokens for more readability. We can see that it did more than just split on space and punctuation symbols:
为了更简洁易读,我们将所有评论删节到100个词。我们可以看到文字标记化算法不仅仅是基于空格和标点进行了分词:
the "'s" are grouped together in one token
<br>所有“'s”都被合并为一个标准词
the contractions are separated like this: "did", "n't"
<br>词语的缩写被分开,比如“did” 和 “n't”
content has been cleaned for any HTML symbol and lower cased
<br>所有包含HTML连接的内容被清理,并且所有文字都采用小写
there are several special tokens (all those that begin by xx), to replace unknown tokens (see below) or to introduce different text fields (here we only have one).
<br>为了代替未知的标准词(如下)或者引入不同的文本字段(这里我们只有一个),(在结果中可以看到)有一些特殊的标准词(它们都以xx开头)
Numericalization 数值化
Once we have extracted tokens from our texts, we convert to integers by creating a list of all the words used. We only keep the ones that appear at least twice with a maximum vocabulary size of 60,000 (by default) and replace the ones that don't make the cut by the unknown token UNK.
一旦我们从文本中完成了标准词提取,就会生成一个包含所有词汇的列表,将标准词转化成整数。这里我们只保留至少出现两次的标准词,并设置词库上限为60,000(默认设置), 同时将所有不能分进行分词的词标记为“未知标准词” UNK。
The correspondance from ids to tokens is stored in the vocab attribute of our datasets, in a dictionary called itos (for int to string).
id和标准词的关系存储在数据集的vocab属性中,在字典 itos 中(由int类型转换成string类型)。
End of explanation
"""
data.train_ds[0][0]
"""
Explanation: And if we look at what a what's in our datasets, we'll see the tokenized text as a representation:
如果我们查看数据集里的“what's”的形式,我们会看到如下经过分词后的文本:
End of explanation
"""
data.train_ds[0][0].data[:10]
"""
Explanation: But the underlying data is all numbers
但实际上,底层的数据形式都是数字
End of explanation
"""
data = (TextList.from_csv(path, 'texts.csv', cols='text')
.split_from_df(col=2)
.label_from_df(cols=0)
.databunch())
"""
Explanation: With the data block API 用data block API处理文字
We can use the data block API with NLP and have a lot more flexibility than what the default factory methods offer. In the previous example for instance, the data was randomly split between train and validation instead of reading the third column of the csv.
活地处理各种情况。比如在之前的例子中,数据随机分为训练集和验证集,而非通过读取csv中第三列的标签来分组。
With the data block API though, we have to manually call the tokenize and numericalize steps. This allows more flexibility, and if you're not using the defaults from fastai, the various arguments to pass will appear in the step they're revelant, so it'll be more readable.
不过如果要使用数据块API,我们需要手动完成分词和数值化的各个步骤。这样可以更加灵活。如果你没有使用fastai工具包里的默认设置,你也可以像下面的步骤一样进行各种设置,并且代码可读性也更高。
End of explanation
"""
bs=48
"""
Explanation: Language model 语言模型
Note that language models can use a lot of GPU, so you may need to decrease batchsize here.
需要注意的是语言文字模型会用掉许多GPU,因此你可能会需要减小每个批次的样本容量。
End of explanation
"""
path = untar_data(URLs.IMDB)
path.ls()
(path/'train').ls()
"""
Explanation: Now let's grab the full dataset for what follows.
现在我们为接下来的步骤获取完整的数据集。
End of explanation
"""
data_lm = (TextList.from_folder(path)
#Inputs: all the text files in path
.filter_by_folder(include=['train', 'test', 'unsup'])
#We may have other temp folders that contain text files so we only keep what's in train and test
.split_by_rand_pct(0.1)
#We randomly split and keep 10% (10,000 reviews) for validation
.label_for_lm()
#We want to do a language model so we label accordingly
.databunch(bs=bs))
data_lm.save('data_lm.pkl')
"""
Explanation: The reviews are in a training and test set following an imagenet structure. The only difference is that there is an unsup folder on top of train and test that contains the unlabelled data.
现在影评遵循imagenet的结构分到了训练集和测试集中。唯一的区别是,在测试集和训练集上会有个包括未标记数据的unsup文件夹。
We're not going to train a model that classifies the reviews from scratch. Like in computer vision, we'll use a model pretrained on a bigger dataset (a cleaned subset of wikipedia called wikitext-103). That model has been trained to guess what the next word is, its input being all the previous words. It has a recurrent structure and a hidden state that is updated each time it sees a new word. This hidden state thus contains information about the sentence up to that point.
我们不需要从无到有地训练一个影评分类模型。就像计算机视觉模型一样,我们将使用一个在更大训练集上预训练好的模型(在维基上有一个清洗好的子集 wikitext-103 )。这个模型被训练来猜测下一个词是什么,它的输入数据是之前已有的词汇。该模型采用循环神经网络结构,并且有一个每次看到新词都会更新的隐层状态。 隐层状态里包含的信息,是文本中到截止这个点之前的所有句子。
We are going to use that 'knowledge' of the English language to build our classifier, but first, like for computer vision, we need to fine-tune the pretrained model to our particular dataset. Because the English of the reviews left by people on IMDB isn't the same as the English of wikipedia, we'll need to adjust the parameters of our model by a little bit. Plus there might be some words that would be extremely common in the reviews dataset but would be barely present in wikipedia, and therefore might not be part of the vocabulary the model was trained on.
我们用这样的预训练模型信息来创建我们的分类器。但首先,正如计算机视觉一样,我们需要对预训练的模型进行调参来适应我们的这个数据集。由于IMDB上影评的英语语言和维基百科上的英语语言风格不尽相同,我们需要将参数进行一定的调整。另外,可能会有些词在影评数据中出现的频率较高,但在维基百科上基本没出现过,因此可能和模型预训练时用的词库不太一样。
This is where the unlabelled data is going to be useful to us, as we can use it to fine-tune our model. Let's create our data object with the data block API (next line takes a few minutes).
我们可以用未标记的数据进行模型微调,这就是未标记数据具有价值的地方。让我们通过数据块API来建立一个数据对象。(下行会花费数分钟的时间)
End of explanation
"""
data_lm = load_data(path, 'data_lm.pkl', bs=bs)
data_lm.show_batch()
"""
Explanation: We have to use a special kind of TextDataBunch for the language model, that ignores the labels (that's why we put 0 everywhere), will shuffle the texts at each epoch before concatenating them all together (only for training, we don't shuffle for the validation set) and will send batches that read that text in order with targets that are the next word in the sentence.
对于语言模型,我们需要用一个特殊的TextDataBunch,它会忽略标签(这就是为什么我们给所有地方都设置为0的原因),在将每个轮次的文字合并在一起之前打乱所有的文字(仅限于模型训练,我们不会对验证集进行混洗),并会分批次按顺序读取文字和接下来对应的单词。
The line before being a bit long, we want to load quickly the final ids by using the following cell.
之前的代码会有点长,我们可以用下面的代码用id快速导入对应的文字。
End of explanation
"""
learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3)
learn.lr_find()
learn.recorder.plot(skip_end=15)
learn.fit_one_cycle(1, 1e-2, moms=(0.8,0.7))
learn.save('fit_head')
learn.load('fit_head');
"""
Explanation: We can then put this in a learner object very easily with a model loaded with the pretrained weights. They'll be downloaded the first time you'll execute the following line and stored in ~/.fastai/models/ (or elsewhere if you specified different paths in your config file).
我们可以很轻易地将模型和预训练的权重结合为一个学习器对象。在你第一次运行下面的代码时,所有模型的信息会下载并存储到~/.fastai/models/ 或者其他由你的config文件指定的地方。
End of explanation
"""
learn.unfreeze()
learn.fit_one_cycle(10, 1e-3, moms=(0.8,0.7))
learn.save('fine_tuned')
"""
Explanation: To complete the fine-tuning, we can then unfeeze and launch a new training.
要完成微调,我们可以解冻模型并开启新的训练。
End of explanation
"""
learn.load('fine_tuned');
TEXT = "I liked this movie because"
N_WORDS = 40
N_SENTENCES = 2
print("\n".join(learn.predict(TEXT, N_WORDS, temperature=0.75) for _ in range(N_SENTENCES)))
"""
Explanation: How good is our model? Well let's try to see what it predicts after a few given words.
我们的模型表现怎么样呢? 嗯,让我们来看看在几个词过后模型预测出的词是怎样的。
End of explanation
"""
learn.save_encoder('fine_tuned_enc')
"""
Explanation: We have to save not only the model, but also its encoder, the part that's responsible for creating and updating the hidden state. For the next part, we don't care about the part that tries to guess the next word.
我们不但保存了模型,而且保存了它的编码器,(也就是)负责创建和更新隐层状态(的部分)。剩下的负责猜词的部分,我们就不管了。
End of explanation
"""
path = untar_data(URLs.IMDB)
data_clas = (TextList.from_folder(path, vocab=data_lm.vocab)
#grab all the text files in path
.split_by_folder(valid='test')
#split by train and valid folder (that only keeps 'train' and 'test' so no need to filter)
.label_from_folder(classes=['neg', 'pos'])
#label them all with their folders
.databunch(bs=bs))
data_clas.save('data_clas.pkl')
data_clas = load_data(path, 'data_clas.pkl', bs=bs)
data_clas.show_batch()
"""
Explanation: Classifier 分类器
Now, we'll create a new data object that only grabs the labelled data and keeps those labels. Again, this line takes a bit of time.
现在我们要创建一个新的数据对象,仅抓取有标签的数据并且保留标签。这个步骤可能会需要一点时间。
End of explanation
"""
learn = text_classifier_learner(data_clas, AWD_LSTM, drop_mult=0.5)
learn.load_encoder('fine_tuned_enc')
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(1, 2e-2, moms=(0.8,0.7))
learn.save('first')
learn.load('first');
learn.freeze_to(-2)
learn.fit_one_cycle(1, slice(1e-2/(2.6**4),1e-2), moms=(0.8,0.7))
learn.save('second')
learn.load('second');
learn.freeze_to(-3)
learn.fit_one_cycle(1, slice(5e-3/(2.6**4),5e-3), moms=(0.8,0.7))
learn.save('third')
learn.load('third');
learn.unfreeze()
learn.fit_one_cycle(2, slice(1e-3/(2.6**4),1e-3), moms=(0.8,0.7))
learn.predict("I really loved that movie, it was awesome!")
"""
Explanation: We can then create a model to classify those reviews and load the encoder we saved before.
我们可以建立一个模型来对影评进行分类,并且导入之前存储好的编码器。
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive/01_bigquery/b_bqml.ipynb | apache-2.0 | PROJECT = "cloud-training-demos" # Replace with your PROJECT
REGION = "us-central1" # Choose an available region for Cloud MLE
import os
os.environ["PROJECT"] = PROJECT
os.environ["REGION"] = REGION
!pip freeze | grep google-cloud-bigquery==1.21.0 || pip install google-cloud-bigquery==1.21.0
%load_ext google.cloud.bigquery
"""
Explanation: Big Query Machine Learning (BQML)
Learning Objectives
- Understand that it is possible to build ML models in Big Query
- Understand when this is appropriate
- Experience building a model using BQML
Introduction
BigQuery is more than just a data warehouse, it also has some ML capabilities baked into it.
As of January 2019 it is limited to linear models, but what it gives up in complexity, it gains in ease of use.
BQML is a great option when a linear model will suffice, or when you want a quick benchmark to beat, but for more complex models such as neural networks you will need to pull the data out of BigQuery and into an ML Framework like TensorFlow.
In this notebook, we will build a naive model using BQML. This notebook is intended to inspire usage of BQML, we will not focus on model performance.
Set up environment variables and load necessary libraries
End of explanation
"""
from google.cloud import bigquery
bq = bigquery.Client(project = PROJECT)
dataset = bigquery.Dataset(bq.dataset("bqml_taxifare"))
try:
bq.create_dataset(dataset) # will fail if dataset already exists
print("Dataset created")
except:
print("Dataset already exists")
"""
Explanation: Create BigQuery dataset
Prior to now we've just been reading an existing BigQuery table, now we're going to create our own so so we need some place to put it. In BigQuery parlance, Dataset means a folder for tables.
We will take advantage of BigQuery's Python Client to create the dataset.
End of explanation
"""
%%bigquery --project $PROJECT
CREATE or REPLACE MODEL bqml_taxifare.taxifare_model
OPTIONS(model_type = "linear_reg",
input_label_cols = ["label"]) AS
-- query to fetch training data
SELECT
(tolls_amount + fare_amount) AS label,
pickup_datetime,
pickup_longitude,
pickup_latitude,
dropoff_longitude,
dropoff_latitude
FROM
`nyc-tlc.yellow.trips`
WHERE
-- Clean Data
trip_distance > 0
AND passenger_count > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
-- repeatable 1/5000th sample
AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 5000)) = 1
"""
Explanation: Create model
To create a model (documentation)
1. Use CREATE MODEL and provide a destination table for resulting model. Alternatively we can use CREATE OR REPLACE MODEL which allows overwriting an existing model.
2. Use OPTIONS to specify the model type (linear_reg or logistic_reg). There are many more options we could specify, such as regularization and learning rate, but we'll accept the defaults.
3. Provide the query which fetches the training data
Have a look at Step Two of this tutorial to see another example.
The query will take about two minutes to complete
End of explanation
"""
%%bigquery --project $PROJECT
SELECT
*
FROM
ML.TRAINING_INFO(MODEL `bqml_taxifare.taxifare_model`)
"""
Explanation: Get training statistics
Because the query uses a CREATE MODEL statement to create a table, you do not see query results. The output is an empty string.
To get the training results we use the ML.TRAINING_INFO function.
Have a look at Step Three and Four of this tutorial to see a similar example.
End of explanation
"""
%%bigquery --project $PROJECT
#standardSQL
SELECT
predicted_label
FROM
ML.PREDICT(MODEL `bqml_taxifare.taxifare_model`,
(
SELECT
TIMESTAMP "2014-01-03 10:00:00" as pickup_datetime,
-74.0080 as pickup_longitude,
40.7434 as pickup_latitude,
-73.7781 as dropoff_longitude,
40.6413 as dropoff_latitude
))
"""
Explanation: 'eval_loss' is reported as mean squared error, so our RMSE is 8.29. Your results may vary.
Predict
To use our model to make predictions, we use ML.PREDICT. Let's, use the taxifare_model you trained above to infer the cost of a taxi ride that occurs at 10:00 am on January 3rd, 2014 going from the Google Office in New York (latitude: 40.7434, longitude: -74.0080) to the JFK airport (latitude: 40.6413, longitude: -73.7781)
Have a look at Step Five of this tutorial to see another example.
End of explanation
"""
|
agile-geoscience/xlines | notebooks/03_AVO_plot.ipynb | apache-2.0 | import numpy as np
import matplotlib.pyplot as plt
"""
Explanation: x lines of Python
Amplitude-vs-offset plot
This notebook accompanies a blog post at Agile*.
In the first x lines we made a 2D synthetic seismogram. A major simplification in that model was normal incidence at 0 degrees of offset: the ray of seismic energy was assumed to be perfectly vertical. In this notebook, we'll model non-vertical incidence.
When the reflection is not perpendicular to the geological interface, we have to use the Zoeppritz equation, or simplications of it, to model the angle-dependent reflectivity. Agile's library bruges (Bag of Really Useful Geophysical Equations and Stuff) has lots of different reflectivity formulations to compare; we'll look at three.
The data is from Blangy, JP, 1994, AVO in tranversely isotropic media—An overview. Geophysics 59 (5), 775–781. Blangy conveniently defined his model rocks very fully and clearly (take not would-be authors!). Related blog post: The Blangy equation
Before we start, the usual prelims:
End of explanation
"""
# Upper layer: shale.
vp0, vs0, ρ0, δ0, ε0 = 2307, 1108, 2150, 0.15, 0.30 # Line 1
# Lower layer: wet sand.
vp1, vs1, ρ1, δ1, ε1 = 1951, 930, 2200, 0.00, 0.00 # Line 2
# Lower layer: gas sand.
vp1g, vs1g, ρ1g, δ1g, ε1g = 1951, 1301, 1950, 0.00, 0.00 # Line 3
"""
Explanation: We'll initiate some variables with some of Blangy's test data: the Type 3 AVO rocks from his Table 1. We only need the acoustic properties at first, but we'll define the elastic and anisotropic parameters as well, just in case we need them later (we will!).
End of explanation
"""
fig = plt.figure(figsize=(12, 3))
z = np.arange(0, 20)
ax0 = fig.add_subplot(1, 3, 1)
ax0.plot(np.append(np.repeat(vp0, 10), np.repeat(vp1, 10)), z, 'ob', markeredgecolor='none')
ax0.plot(np.append(np.repeat(vp0, 10), np.repeat(vp1g, 10)), z, 'ob', alpha=0.4, markeredgecolor='none')
ax0.set_xlim(1900, 2400)
ax0.axhline(9.5, c='k')
ax0.invert_yaxis()
ax0.set_title('Vp')
ax0.text(2000, 5, 'SHALE')
ax0.text(2200, 15, 'SANDSTONE')
ax0.text(1980, 13, 'wet')
ax0.text(1980, 17, 'gas', alpha=0.4)
ax1 = fig.add_subplot(1, 3, 2)
ax1.plot(np.append(np.repeat(vs0, 10), np.repeat(vs1, 10)), z, 'og', markeredgecolor='none')
ax1.plot(np.append(np.repeat(vs0, 10), np.repeat(vs1g, 10)), z, 'og', alpha=0.4, markeredgecolor='none')
ax1.set_xlim(850, 1350)
ax1.axhline(9.5, c='k')
ax1.invert_yaxis()
ax1.set_title('Vs')
ax1.text(950, 5, 'SHALE')
ax1.text(1050, 15, 'SANDSTONE')
ax1.text(950, 13, 'wet')
ax1.text(1220, 17, 'gas', alpha=0.4)
ax2 = fig.add_subplot(1, 3, 3)
ax2.plot(np.append(np.repeat(ρ0, 10), np.repeat(ρ1, 10)), z, 'or', markeredgecolor='none')
ax2.plot(np.append(np.repeat(ρ0, 10), np.repeat(ρ1g, 10)), z, 'or', alpha=0.4, markeredgecolor='none')
ax2.set_xlim(1800, 2500)
ax2.axhline(9.5, c='k')
ax2.invert_yaxis()
ax2.set_title('rho')
ax2.text(1900, 5, 'SHALE')
ax2.text(2250, 15, 'SANDSTONE')
ax2.text(2100, 13, 'wet')
ax2.text(2000, 17, 'gas', alpha=0.4)
plt.show()
"""
Explanation: For peace of mind, or just for fun, we can make a plot of these properties.
End of explanation
"""
# I'm on a tight line budget so I'm defining a function on a
# single line. Don't do this, it makes your code less readable.
def dom(upper, lower): return np.subtract(lower, upper) / np.mean((lower, upper))
"""
Explanation: Linear Shuey equation
Let's start with a bit of maths — the 2-term Shuey approximation. I'm using the formulation given by Avesth, P, T Mukerji and G Mavko (2005). Quantitative seismic interpretation. Cambridge University Press, Cambridge, UK.
$$R(\theta) \approx R(0) + G \sin^2 \theta$$
where
$$R(0) = \frac{1}{2} \left( \frac{\Delta V_\mathrm{P}}{V_\mathrm{P}} + \frac{\Delta \rho}{\rho} \right)$$
and
$$G = \frac{1}{2} \frac{\Delta V_\mathrm{P}}{V_\mathrm{P}} - 2 \frac{V^2_\mathrm{S}}{V^2_\mathrm{P}} \left( \frac{\Delta \rho}{\rho} + 2 \frac{\Delta V_\mathrm{S}}{V_\mathrm{S}} \right)$$
In these equations, $\Delta V_\mathrm{P}$ means the difference in the velocity of the two layers, and $V_\mathrm{P}$ means the mean of the two layers. Let's make a function to help with this 'difference over mean':
End of explanation
"""
R0 = 0.5 * (dom(vp0, vp1) + dom(ρ0, ρ1))
R0
"""
Explanation: First term:
End of explanation
"""
import bruges
# I have to use 31 because `arange()` goes up to but not including.
θ = range(0, 31) # Line 4
shuey = bruges.reflection.shuey2(vp0, vs0, ρ0, # Line 5
vp1, vs1, ρ1,
θ)
"""
Explanation: OK, that looks reasonable, but the second term $G$ is going to take some really fiddly math... and I'm on a budget, I don't have enough lines for all that. Besides, I might easily make a mistake.
Luckily, our library bruges has all these equations. There's a bruges.reflection.shuey2 function that returns the 2-term Shuey reflectivity for a given interface and angle range.
End of explanation
"""
shuey
shuey_g = bruges.reflection.shuey2(vp0, vs0, ρ0, # Line 6
vp1g, vs1g, ρ1g,
θ)
plt.plot(shuey, label='Brine case') # Line 7
plt.plot(shuey_g, 'red', label='Gas case') # Line 8
plt.legend(loc='best'); # Line 9
"""
Explanation: I now have an array of the reflection coefficients corresponding to the angles I passed in (0 to 40 degrees of offset).
End of explanation
"""
plt.axhline(0, color='k', alpha=0.3)
plt.plot(θ, shuey, 'b', lw=2, label='Brine case')
plt.plot(θ, shuey_g, 'r', lw=2, label='Gas case')
plt.axhline(0, color='k', alpha=0.4)
plt.ylim(-0.25, 0.1)
plt.xlabel('theta [deg]')
plt.ylabel('reflectivity [unitless]')
plt.legend(loc='best')
plt.show()
"""
Explanation: That's the 10 lines of Python used up, but we already got a useful plot.
Weith a few more lines, we can make the plot a bit prettier.
End of explanation
"""
θ = np.arange(0, 51)
shuey = bruges.reflection.shuey2(vp0, vs0, ρ0,
vp1, vs1, ρ1,
θ)
zoeppritz = bruges.reflection.zoeppritz_rpp(vp0, vs0, ρ0,
vp1, vs1, ρ1,
θ)
akirichards = bruges.reflection.akirichards(vp0, vs0, ρ0,
vp1, vs1, ρ1,
θ)
plt.plot(shuey, label='Shuey')
plt.plot(zoeppritz, 'r', lw=2, label='Zoeppritz')
plt.plot(akirichards, label='Aki-Richards')
plt.axhline(0, color='k', alpha=0.4)
plt.xlabel('theta [deg]')
plt.ylabel('reflectivity [unitless]')
plt.legend(loc='best')
plt.show()
"""
Explanation: Compare to Zoeppritz and Aki-Richards
We could also replace that 2-term Shuey evaluation with another algorithm. For example, let's compute the Aki-Richards approximation and the full Zoeppritz solution, and compare the three results.
First, we'll make a wider angle range, so we can compare them outside the reliability 'window' of Shuey's approximation (up to about 25 degrees or so for most interfaces).
End of explanation
"""
# The function returns the isotropic and the anisotropic reponses.
# Since we don't need the isotropic response (it's the same as the
# Aki-Richards solution), we can assign it to _, a sort of throwaway.
_, blangy = bruges.rockphysics.blangy(vp0, vs0, ρ0, δ0, ε0,
vp1, vs1, ρ1, δ1, ε1,
θ)
plt.plot(akirichards, label='Aki-Richards')
plt.plot(blangy, label='Blangy')
plt.axhline(0, color='k', alpha=0.4)
plt.legend(loc='best')
plt.show()
"""
Explanation: You can see how Shuey breaks down at about 25 degrees, whereas Aki-Richards is quite reliable even to wide offsets.
Isotropic and anisotropic approximations
We can go further still. Blangy's paper gives us an AVO approximation for weakly anisotropic rocks. We can use another bruges function to compute that response.
End of explanation
"""
|
calee0219/Course | DM/DataMining/hw1.ipynb | mit | import pandas as pd
import datetime
df = pd.read_csv('201707-citibike-tripdata.csv')
df.columns = ['tripduration','starttime','stoptime',\
'start_station_id','start_station_name','start_station_latitude','start_station_longitude',\
'end_station_id','end_station_name','end_station_latitude','end_station_longitude',\
'bikeid','usertype','birth_year','gender']
"""
Explanation: 2017 NCTU Data Maning HW1
0416037 李家安
Info
Group 3
Dataset: New York Citi Bike Trip Histories, first data
Task
What rules should be discover?
What rules should be discover?
Need
what is a transaction
what rules should be discovered(and discretization method)
what algorithm you use(Apriori or FP-growth or something else)
a. algorithm code from github is allowed(cite the repository)
top 3 rules
what did you learned, or comparison between different methods you use
Data Preprocessing
由於 hw0 再做 data preprocess 的部份有點不足,因此我在改了一點
另外我希望能將 data 塞入 mysql 以方便一些 sql 原有的 query
因此只會做處理的部份,比較不會印出來
Load Data
End of explanation
"""
from sqlalchemy import create_engine
engine = create_engine('mysql://calee0219:110010@localhost/citybike')
"""
Explanation: Connect sql
End of explanation
"""
print(df.isnull().sum().sum())
print(pd.isnull(df).sum() > 0)
birth_mean = df['birth_year'].mean()
df = df.fillna(birth_mean)
"""
Explanation: Remove NAN
觀察發現只有 birth 有出現 NAN,用 mean 取代沒有紀錄的 NaN
End of explanation
"""
df = df.drop(df.index[df['starttime'] >= df['stoptime']])
df = df.reset_index(drop=True)
"""
Explanation: 避免出現 start time >= end time
End of explanation
"""
import datetime
import operator
from pyproj import Geod
wgs84_geod = Geod(ellps='WGS84')
start = [datetime.datetime.strptime(dt, '%Y-%m-%d %H:%M:%S') for dt in df['starttime'].tolist()]
end = [datetime.datetime.strptime(dt, '%Y-%m-%d %H:%M:%S') for dt in df['stoptime'].tolist()]
def Distance(lat1,lon1,lat2,lon2):
az12,az21,dist = wgs84_geod.inv(lon1,lat1,lon2,lat2)
return dist
dist = Distance(df['start_station_latitude'].tolist(), df['start_station_longitude'].tolist(), \
df['end_station_latitude'].tolist(), df['end_station_longitude'].tolist())
speed = list(map(operator.truediv, [x/1000 for x in dist], [ time.seconds/3600 for time in list(map(operator.sub, end, start))]))
zp = list(zip(speed,list(range(df.shape[0]))))
zp.sort()
zp.reverse()
for i in zp[:6]:
print(i)
df = df.drop(df.index[[716622,320615,1393557,1260345]])
df.reset_index(drop=True, inplace=True)
"""
Explanation: 刪除 speed 過快不合理的狀況
上網查好像腳踏車時速大概可以到 40 上下,所以我把以上的部分刪掉
End of explanation
"""
from sqlalchemy import types
try:
df = pd.read_sql_table(table_name='origin', con=engine)
except:
df['tripduration'].astype(int)
df['starttime'] = pd.to_datetime(df['starttime'])
df['stoptime'] = pd.to_datetime(df['stoptime'])
df['start_station_id'].astype(int)
df['start_station_name'].astype(str)
df['start_station_latitude'].astype(float)
df['start_station_longitude'].astype(float)
df['end_station_id'].astype(int)
df['end_station_name'].astype(str)
df['end_station_latitude'].astype(float)
df['end_station_longitude'].astype(float)
df['bikeid'].astype(int)
df['usertype'].astype(str)
df['birth_year'].astype(int)
df['gender'].astype(int)
df.to_sql(name='origin', con=engine, if_exists='replace',index=False,\
dtype={'starttime': types.DATETIME, 'stoptime': types.DATETIME, 'birth_year': types.BIGINT})
"""
Explanation: 將處理完的原始 table 先存一份
過程中先轉型
End of explanation
"""
try:
station = pd.read_sql_table(table_name='station', con=engine)
except:
station = pd.DataFrame(df[['start_station_id', 'start_station_name', 'start_station_latitude', 'start_station_longitude']])
station.columns = ['id', 'name', 'latitude', 'longitude']
tmp = pd.DataFrame(df[['end_station_id', 'end_station_name', 'end_station_latitude', 'end_station_longitude']])
tmp.columns = ['id', 'name', 'latitude', 'longitude']
station = pd.concat([station, tmp])
station = station.sort_values('id').drop_duplicates().reset_index(drop=True)
station.to_sql(name='station', con=engine, if_exists='fail',index=False)
"""
Explanation: 將原始資料切出只有 station 的資料
只保留 id, name, lat, lng,並且刪掉重複的
End of explanation
"""
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
my_map = Basemap(projection='merc', lat_0=40.7, lon_0=-73.98,
resolution = 'h', area_thresh = 0.01,
llcrnrlon=-74.1, llcrnrlat=40.64,
urcrnrlon=-73.9, urcrnrlat=40.85)
lon = station['longitude'].tolist()
lat = station['latitude'].tolist()
labels = station['id'].tolist()
fig = plt.figure(frameon=False)
fig.set_size_inches(18,12)
my_map.drawcoastlines()
my_map.drawcountries()
my_map.fillcontinents(color='coral')
my_map.drawmapboundary()
x,y = my_map(lon, lat)
my_map.plot(x, y, 'bo', markersize=2)
plt.show()
"""
Explanation: 確認是否有不當的位置
看起來沒有
End of explanation
"""
from sqlalchemy import types
try:
path = pd.read_sql_table(table_name='path', con=engine)
except:
path = df.drop(['start_station_name', 'start_station_latitude', 'start_station_longitude', 'end_station_name', 'end_station_latitude', 'end_station_longitude'], axis=1)
path.to_csv('path.csv', index=False)
path.to_sql(name='path', con=engine, if_exists='fail',index=False,\
dtype={'starttime': types.DATETIME, 'stoptime': types.DATETIME, 'birth_year': types.BIGINT})
"""
Explanation: 切出 path table
只留下 tripduration, starttime, stoptime, start station id, end station id, bikeid, usertype, birth year, gender
End of explanation
"""
import bisect
import datetime
try:
in_out = pd.read_sql_table(table_name='in_out', con=engine)
except:
begin = datetime.datetime(2017, 7, 1, 0, 0, 0)
end = datetime.datetime(2017, 8, 1, 23, 30, 0)
date_list = [ end - datetime.timedelta(seconds=x*60*30) for x in range(0, 1536)][::-1]
table = {}
for idx, row in path.iterrows():
start_date = row['starttime']
start = date_list[bisect.bisect_right(date_list, start_date)]
end_date = row['stoptime']
end = date_list[bisect.bisect_right(date_list, end_date)]
start_tmp = (row['start_station_id'], start)
if table.get(start_tmp) == None:
table[start_tmp] = (1,0)
else:
tmp = list(table[start_tmp])
tmp[0] += 1
table[start_tmp] = tuple(tmp)
stop_tmp = (row['end_station_id'], start)
if table.get(stop_tmp) == None:
table[stop_tmp] = (0,1)
else:
tmp = list(table[stop_tmp])
tmp[1] += 1
table[stop_tmp] = tuple(tmp)
tmp_in_out = []
for key in table.keys():
tmp_in_out.append([key[0], key[1], table[key][0], table[key][1]])
in_out = pd.DataFrame(tmp_in_out, columns=['id', 'time', 'in', 'out'])
in_out.to_sql(name='in_out', con=engine, if_exists='replace',index=False,\
dtype={'time': types.DATETIME})
"""
Explanation: 切出 in / out flow table
為了方便 query,留下 id, time, in-flow, out-flow
End of explanation
"""
import pandas as pd
from mlxtend.preprocessing import OnehotTransactions
from mlxtend.frequent_patterns import apriori
transactions = []
for idx, row in in_out.iterrows():
if row['id'] == 519:
transactions.append([('in',row['in']//10), ('out',row['out']//10)])
min_sup = 0.01
oht = OnehotTransactions()
oht_ary = oht.fit(transactions).transform(transactions)
df = pd.DataFrame(oht_ary, columns=oht.columns_)
frequent_itemsets = apriori(df, min_support=min_sup, use_colnames=True)
frequent_itemsets['length'] = frequent_itemsets['itemsets'].apply(lambda x: len(x))
fqs = frequent_itemsets[ (frequent_itemsets['length'] >= 2) &
(frequent_itemsets['support'] >= min_sup) ].sort_values(['support'], ascending=False)
print(fqs)
for idx, row in fqs.iterrows():
cof = row['itemsets'][0]
import Orange
from orangecontrib.associate.fpgrowth import *
transactions = []
for idx, row in in_out.iterrows():
if row['id'] == 519:
transactions.append([('in',row['in']//10), ('out',row['out']//10)])
#transactions = np.array(transactions)
import pyfpgrowth
patterns = pyfpgrowth.find_frequent_patterns(transactions, 10)
print(patterns)
rules = pyfpgrowth.generate_association_rules(patterns, 0.3)
for key in rules.keys():
print(key, rules[key])
"""
Explanation: Task
Transactions 1
助教給的 transactions
in / out flow when station_id=519
如助教給的作法,我把 in / out binning 成每 10 個 count 為一組
End of explanation
"""
query = "SELECT in_out.id, in_out.time, in_out.in, in_out.out, T.latitude, T.longitude FROM in_out left join ( SELECT id, latitude, longitude from station )T ON T.id = in_out.id ORDER BY id"
table = pd.read_sql_query(query, engine)
lat_mean = station['latitude'].mean()
lon_mean = station['longitude'].mean()
#print(lat_mean, lon_mean)
def Distance(lat1,lon1,lat2,lon2):
az12,az21,dist = wgs84_geod.inv(lon1,lat1,lon2,lat2)
return dist
from orangecontrib.associate.fpgrowth import *
rem = {}
for idx, row in station.iterrows():
rem[row['id']] = Distance(lat_mean, lon_mean, row['latitude'], row['longitude'])//1000 # 以公里為單位
from fp_growth import *
transactions = []
for idx, row in table.iterrows():
rin = row['in'] // 10
rout = row['out'] // 10
if rin == 0 or rout == 0: continue
transactions.append([(rem[row['id']], row['time'].time().isoformat()), ('in',rin), ('out',rout)])
result = {}
for itemset, support in find_frequent_itemsets(transactions, .02*len(transactions), True):
result[tuple(itemset)] = support/len(transactions)
def subs(l):
assert type(l) is list
if len(l) == 1:
return [l]
x = subs(l[1:])
return x + [[l[0]] + y for y in x]
def assRule(freq, min_conf = 0.6):
assert type(freq) is dict
result = []
for item, sup in freq.items():
for subitem in subs(list(item)):
sb = [x for x in item if x not in subitem]
if sb == [] or subitem == []: continue
if len(subitem) == 1 and (subitem[0][0] == 'in' or subitem[0][0] == 'out'):
continue
conf = sup/freq[tuple(subitem)]
if conf >= min_conf:
result.append({'from':subitem, 'to':sb, 'sup':sup, 'conf':conf})
return result
rules = assRule(result, 0.8)
#print(rules)
for ru in rules:
print(ru)
"""
Explanation: 我的 transaction 是用:
如助教給的策資,是用 station_id=519 的 in-flow, out-flow
應該要找到的 rule / discretization method
如助教建議的,對於 in / out flow,做了 10 個數量級切一份的 binning
算法
如上面兩個,一個是用 apriori、一個是用 fpgroth 分別找出 frequency itemset
top 3 rules
('out', 0) -> ('in', 0)
('in', 0) -> ('out', 0)
('out', 3) -> ('in', 3)
What do I learned
用 fpgroth 的有列出了 association rules,但是都可以看出基本上還是以 in-flow = 0, out-flow = 0 為大宗
因此我認為 id = 512 的 itemset 基本上沒有什麼有用的 association rules
in / out flow 似乎看起來在相同的時間區間中,數量級會相同
Transaction 2
我希望能找到 距離中心點位置、時間與 in / out flow 的關係
為避免找 frequency itemset 太慢,之後都用 fpgrowth 的方法找
End of explanation
"""
query = '''
SELECT in_out.id, in_out.time, in_out.in, in_out.out, T1.st_time, T2.en_time
FROM in_out
LEFT JOIN (
SELECT start_station_id AS st_id, SEC_TO_TIME(AVG(TIME_TO_SEC(DATE_FORMAT(starttime, "%%H:%%i:%%s")))) AS st_time
FROM path
GROUP BY start_station_id
)T1 ON in_out.id = T1.st_id
LEFT JOIN (
SELECT end_station_id AS en_id, SEC_TO_TIME(AVG(TIME_TO_SEC(DATE_FORMAT(stoptime, "%%H:%%i:%%s")))) AS en_time
FROM path
GROUP BY end_station_id
)T2 ON in_out.id = T2.en_id
ORDER BY in_out.id;
'''
table = pd.read_sql_query(query, engine)
transactions = []
for idx, row in table.iterrows():
rin = row['in'] // 10
rout = row['out'] // 10
if rin == 0 or rout == 0: continue
st = (datetime.datetime.min+row['st_time']).time().replace(second=0, microsecond=0)
st = st.replace(minute=st.minute//10 * 10).isoformat()
en = (datetime.datetime.min+row['en_time']).time().replace(second=0, microsecond=0)
en = en.replace(minute=en.minute//10 * 10).isoformat()
transactions.append([('stime', st), ('etime', en), ('in',rin), ('out',rout)])
result = {}
for itemset, support in find_frequent_itemsets(transactions, .04*len(transactions), True):
result[tuple(itemset)] = support/len(transactions)
def subs(l):
assert type(l) is list
if len(l) == 1:
return [l]
x = subs(l[1:])
return x + [[l[0]] + y for y in x]
def assRule(freq, min_conf = 0.6):
assert type(freq) is dict
result = []
for item, sup in freq.items():
for subitem in subs(list(item)):
sb = [x for x in item if x not in subitem]
if sb == [] or subitem == []: continue
if len(subitem) == 1 and (subitem[0][0] == 'in' or subitem[0][0] == 'out'):
continue
conf = sup/freq[tuple(subitem)]
if conf >= min_conf:
result.append({'from':subitem, 'to':sb, 'sup':sup, 'conf':conf})
return result
rules = assRule(result, 0.9)
#print(rules)
for ru in rules:
print(ru)
"""
Explanation: 我的 transaction 是用:
與中心點的距離 (km)
時間 30 (min) binning
in-flow
out-flow
應該要找到的 rule / discretization method
我希望找到與中心點距離、時間、in / out flow 關係,例如
我把 位置跟中心距離 與 時間 放在一起看,希望能得到兩個一起出來的結果
可能與中心點一段距離中間,可以找到適當的 in / out flow 關係
discretization method
距離除到公里整數
時間每 30 分鐘一切,一天 24 hr 分成 48 段
in / out flow: 每 10 個數量級一個 binning
算法
我用的算法是 fp-groth
top 3 rules
以 confidence 來看了話,應該是:
('in', 1), (1, 18:30) -> ('out', 1)
(1, 19:00) -> ('out', 1)
(1, 18:00) -> ('out', 1)
What do I learned
基本上可以看出,與中心點 1 km - 2 km 的距離的站台,在晚上 18:00 - 19:00 時的 in / out flow 大概會在 10 - 20 的數量級之間
Transaction 3
我想試著找出 start time / end time / in-flow / out-flow / speed / distance 之間是否有神秘的關係
End of explanation
"""
|
ScienceStacks/jupyter_scisheets_widget | test_notebooks/20171005_notebook_narrative_scisheets_widget.ipynb | bsd-3-clause | import json
import numpy as np
import pandas as pd
from jupyter_scisheets_widget import scisheets_widget
"""
Explanation: Demonstration of Use Case
Users can enter step by step explanations of changes made to a SciSheet in a Jupyter notebook
Load necessary packages
End of explanation
"""
import pandas_datareader as pdr
ibm_data = pdr.get_data_yahoo('IBM')
income_data = pd.read_csv('income_data.csv', sep=';')
income_data
"""
Explanation: Load data into the notebook
End of explanation
"""
tbl2 = scisheets_widget.HandsonDataFrame(income_data)
tbl2.show()
tbl2._df
tbl2._widget._model_data
tbl2._widget._model_header
"""
Explanation: Display the loaded data as a scisheet widget
End of explanation
"""
def bob(df):
df_bob = df
df_bob['help'] = df_bob['State']
return df_bob
bob(income_data)
bob2 = income_data
income_data.update(bob2)
income_data2 = income_data.copy()
income_data2.loc[1, '2006'] = 'Tewnty'
income_data2
income_data
income_data.update(income_data2)
income_data
"""
Explanation: Sanity check to explore df functionality
End of explanation
"""
|
keras-team/keras-io | examples/vision/ipynb/knowledge_distillation.ipynb | apache-2.0 | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
"""
Explanation: Knowledge Distillation
Author: Kenneth Borup<br>
Date created: 2020/09/01<br>
Last modified: 2020/09/01<br>
Description: Implementation of classical Knowledge Distillation.
Introduction to Knowledge Distillation
Knowledge Distillation is a procedure for model
compression, in which a small (student) model is trained to match a large pre-trained
(teacher) model. Knowledge is transferred from the teacher model to the student
by minimizing a loss function, aimed at matching softened teacher logits as well as
ground-truth labels.
The logits are softened by applying a "temperature" scaling function in the softmax,
effectively smoothing out the probability distribution and revealing
inter-class relationships learned by the teacher.
Reference:
Hinton et al. (2015)
Setup
End of explanation
"""
class Distiller(keras.Model):
def __init__(self, student, teacher):
super(Distiller, self).__init__()
self.teacher = teacher
self.student = student
def compile(
self,
optimizer,
metrics,
student_loss_fn,
distillation_loss_fn,
alpha=0.1,
temperature=3,
):
""" Configure the distiller.
Args:
optimizer: Keras optimizer for the student weights
metrics: Keras metrics for evaluation
student_loss_fn: Loss function of difference between student
predictions and ground-truth
distillation_loss_fn: Loss function of difference between soft
student predictions and soft teacher predictions
alpha: weight to student_loss_fn and 1-alpha to distillation_loss_fn
temperature: Temperature for softening probability distributions.
Larger temperature gives softer distributions.
"""
super(Distiller, self).compile(optimizer=optimizer, metrics=metrics)
self.student_loss_fn = student_loss_fn
self.distillation_loss_fn = distillation_loss_fn
self.alpha = alpha
self.temperature = temperature
def train_step(self, data):
# Unpack data
x, y = data
# Forward pass of teacher
teacher_predictions = self.teacher(x, training=False)
with tf.GradientTape() as tape:
# Forward pass of student
student_predictions = self.student(x, training=True)
# Compute losses
student_loss = self.student_loss_fn(y, student_predictions)
distillation_loss = self.distillation_loss_fn(
tf.nn.softmax(teacher_predictions / self.temperature, axis=1),
tf.nn.softmax(student_predictions / self.temperature, axis=1),
)
loss = self.alpha * student_loss + (1 - self.alpha) * distillation_loss
# Compute gradients
trainable_vars = self.student.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update the metrics configured in `compile()`.
self.compiled_metrics.update_state(y, student_predictions)
# Return a dict of performance
results = {m.name: m.result() for m in self.metrics}
results.update(
{"student_loss": student_loss, "distillation_loss": distillation_loss}
)
return results
def test_step(self, data):
# Unpack the data
x, y = data
# Compute predictions
y_prediction = self.student(x, training=False)
# Calculate the loss
student_loss = self.student_loss_fn(y, y_prediction)
# Update the metrics.
self.compiled_metrics.update_state(y, y_prediction)
# Return a dict of performance
results = {m.name: m.result() for m in self.metrics}
results.update({"student_loss": student_loss})
return results
"""
Explanation: Construct Distiller() class
The custom Distiller() class, overrides the Model methods train_step, test_step,
and compile(). In order to use the distiller, we need:
A trained teacher model
A student model to train
A student loss function on the difference between student predictions and ground-truth
A distillation loss function, along with a temperature, on the difference between the
soft student predictions and the soft teacher labels
An alpha factor to weight the student and distillation loss
An optimizer for the student and (optional) metrics to evaluate performance
In the train_step method, we perform a forward pass of both the teacher and student,
calculate the loss with weighting of the student_loss and distillation_loss by alpha and
1 - alpha, respectively, and perform the backward pass. Note: only the student weights are updated,
and therefore we only calculate the gradients for the student weights.
In the test_step method, we evaluate the student model on the provided dataset.
End of explanation
"""
# Create the teacher
teacher = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(256, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding="same"),
layers.Conv2D(512, (3, 3), strides=(2, 2), padding="same"),
layers.Flatten(),
layers.Dense(10),
],
name="teacher",
)
# Create the student
student = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(16, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding="same"),
layers.Conv2D(32, (3, 3), strides=(2, 2), padding="same"),
layers.Flatten(),
layers.Dense(10),
],
name="student",
)
# Clone student for later comparison
student_scratch = keras.models.clone_model(student)
"""
Explanation: Create student and teacher models
Initialy, we create a teacher model and a smaller student model. Both models are
convolutional neural networks and created using Sequential(),
but could be any Keras model.
End of explanation
"""
# Prepare the train and test dataset.
batch_size = 64
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Normalize data
x_train = x_train.astype("float32") / 255.0
x_train = np.reshape(x_train, (-1, 28, 28, 1))
x_test = x_test.astype("float32") / 255.0
x_test = np.reshape(x_test, (-1, 28, 28, 1))
"""
Explanation: Prepare the dataset
The dataset used for training the teacher and distilling the teacher is
MNIST, and the procedure would be equivalent for any other
dataset, e.g. CIFAR-10, with a suitable choice
of models. Both the student and teacher are trained on the training set and evaluated on
the test set.
End of explanation
"""
# Train teacher as usual
teacher.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Train and evaluate teacher on data.
teacher.fit(x_train, y_train, epochs=5)
teacher.evaluate(x_test, y_test)
"""
Explanation: Train the teacher
In knowledge distillation we assume that the teacher is trained and fixed. Thus, we start
by training the teacher model on the training set in the usual way.
End of explanation
"""
# Initialize and compile distiller
distiller = Distiller(student=student, teacher=teacher)
distiller.compile(
optimizer=keras.optimizers.Adam(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
student_loss_fn=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
distillation_loss_fn=keras.losses.KLDivergence(),
alpha=0.1,
temperature=10,
)
# Distill teacher to student
distiller.fit(x_train, y_train, epochs=3)
# Evaluate student on test dataset
distiller.evaluate(x_test, y_test)
"""
Explanation: Distill teacher to student
We have already trained the teacher model, and we only need to initialize a
Distiller(student, teacher) instance, compile() it with the desired losses,
hyperparameters and optimizer, and distill the teacher to the student.
End of explanation
"""
# Train student as doen usually
student_scratch.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Train and evaluate student trained from scratch.
student_scratch.fit(x_train, y_train, epochs=3)
student_scratch.evaluate(x_test, y_test)
"""
Explanation: Train student from scratch for comparison
We can also train an equivalent student model from scratch without the teacher, in order
to evaluate the performance gain obtained by knowledge distillation.
End of explanation
"""
|
mssalvador/WorkflowCleaning | notebooks/SemiSupervised Demo.ipynb | apache-2.0 | %run initilization.py
path = '/home/svanhmic/workspace/DABAI/Workflows/dist_workflow/'
packages = [path+'semisupervised.zip', path+'shared.zip', path+'cleaning.zip',
path+'examples.zip', path+'classification.zip']
for p in packages:
sc.addPyFile(p)
"""
Explanation: A tour through Semisupervised learning
This notebook is intended to be a short, but thorough, introduction to semisupervised learning. We'll cover some basic stuff, including: the different methods, the first attempt to use semisupervised in Apache Spark and, hopefully, a first working version of Label Propagation.
End of explanation
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
n = 300
x = np.random.uniform(0,2*3.14, n) # np.linspace(0,2*3.14,n)
a = 5.
b = 1
fig = plt.figure(figsize=(15, 15))
ax = fig.gca(projection='3d')
double_helix = []
for color, shape, par, number in [('r', 'o', a, 0), ('b','x', -a, 1)]:
helix = np.array(list(map(lambda x: (par*np.sin(x),par*np.cos(x),b*x, number),x)))
ax.scatter(xs=helix[:,0], ys= helix[:,1], zs=helix[:,2], c=color, marker=shape)
double_helix.append(helix)
plt.show()
import functools
missing_label_func = lambda x: np.random.permutation([x]+(n-1)*[np.NaN])
missing_label = np.reshape(np.hstack((missing_label_func(0.),missing_label_func(1.))),(2*n,1))
spring = np.hstack((np.vstack(double_helix),missing_label))
spring_pdf = pd.DataFrame(spring,columns='x y z org_label unknown_label'.split())
spring_pdf = spring_pdf.reset_index()
spring_pdf['id'] = spring_pdf['index']
del spring_pdf['index']
spring_pdf.head()
spring_df = spark.createDataFrame(spring_pdf)
from semisupervised.LabelPropagation import label_propagation
from shared.Extension_to_timeit import pretty_time_result
from pyspark.sql import functions as F
lp_algorithm = functools.partial(
label_propagation, sc=sc, label_col='unknown_label', eval_type='max',
id_col='id',feature_cols=['x','y','z'], k=2, sigma=.4, max_iters=20)
all_transitions_df = []
model_timer = %timeit -r1 -o all_transitions_df.append(lp_algorithm(data_frame=spring_df))
print(pretty_time_result(model_timer))
transition_df = all_transitions_df[0]
transition_df['row','label','initial_label'].show(600,truncate=False)
merged_df = spring_df.alias('a').join(transition_df.alias('b'),F.col('row')==F.col('id'))
merged_df.groupBy(F.col('label'),F.col('org_label')).count().show()
fig = plt.figure(figsize=(15, 15))
ax = fig.gca(projection='3d')
result_spring = merged_df.toPandas()
for i, color in zip(range(2),('r','b')):
ax.scatter(xs=result_spring[result_spring.label == i]['x'],
ys= result_spring[result_spring.label == i]['y'],
zs=result_spring[result_spring.label == i]['z'],
c=color,
marker=shape)
double_helix.append(helix)
plt.show()
"""
Explanation: Spring (2x helix) data
End of explanation
"""
spring_df.show()
from semisupervised.LP_Graph import create_complete_graph
from pyspark.sql import types as T
from pyspark.ml import feature as ml_feature
from pyspark.ml import Pipeline
from pyspark.mllib import linalg as mllib_linalg
from pyspark.ml import linalg as ml_linalg
from shared import ConvertAllToVecToMl
from py4j import protocol
from pyspark.mllib.linalg import distributed
matrix = create_complete_graph(sc, data_frame=spring_df, id_column='id', feature_columns=['x','y','z'], label_column='unknown_label', sigma=0.4)
irmatrix = matrix.toBlockMatrix()
mat = mllib_linalg.Matrices.dense(1,spring_df.count(),np.ones(spring_df.count()))
blocks1 = sc.parallelize([((0, 0), mat)])
blk_mat = distributed.BlockMatrix(blocks1,1024,1024)
summed = blk_mat.multiply(irmatrix)
summed.toLocalMatrix
rrmat= matrix.toIndexedRowMatrix()
rrmat.rows.map(lambda x: )
def _compute_weights(vec_x, vec_y, sigma):
#if isinstance(vec_y, ml_linalg.SparseVector) | isinstance(vec_x, ml_linalg.SparseVector):
return np.exp(-vec_x.squared_distance(vec_y)/sigma**2)
def gen_graph(sc, data_frame, cols = None ):
list_of_vars = [T.StructField('a_id', T.LongType()),
T.StructField('a_lab', T.DoubleType()),
T.StructField('b_id', T.LongType()),
T.StructField('b_lab', T.DoubleType()),
T.StructField('weights', T.DoubleType())
]
schema = T.StructType(list_of_vars)
vectors = ml_feature.VectorAssembler(inputCols=cols,outputCol='features')
converter = ConvertAllToVecToMl.ConvertAllToVecToMl(inputCol='features',outputCol='converteds')
scaler = ml_feature.StandardScaler(withMean=True, withStd=True, inputCol='converteds', outputCol='std_features')
pipeline = Pipeline(stages=[vectors, converter, scaler])
model = pipeline.fit(data_frame)
standard_X = model.transform(data_frame)
to_sparse_udf = F.udf(lambda x: ml_linalg.SparseVector(len(x), [(i,j) for i,j in enumerate(x) if j != 0]), ml_linalg.VectorUDT())
standard_X_sparse = standard_X.withColumn('weights', to_sparse_udf(F.col('std_features')))
bc_vec = sc.broadcast(standard_X_sparse.select('id','weights').rdd.collectAsMap())
#print(broadcasted_vectors.value.get(3))
cartesian_rows = pyspark.Row('a_id','a_lab','b_id','b_lab','weights')
rdd_srink = standard_X_sparse.rdd.map(lambda x: (x['id'], x['unknown_label']))
rdd_cartesian = (rdd_srink
.cartesian(rdd_srink)
.map(lambda x: (*x[0],*x[1]))
.map(lambda x: distributed.MatrixEntry(
x[0],x[2], _compute_weights(bc_vec.value.get(x[0]),
bc_vec.value.get(x[2]),
0.42)
))
)
return distributed.CoordinateMatrix(rdd_cartesian)
output_df = gen_graph(sc, spring_df, 'x y z'.split())
try:
print(output_df.entries.take(5))
except protocol.Py4JJavaError as e:
print(e)
output_df = gen_graph(sc, spring_df, 'x y z'.split())
%timeit -r1 -o output_df.collect()
print(output_df.rdd.getNumPartitions())
"""
Explanation: Performance testing Label Propgation
Identifying potential bottelnecks
First: Graph Generation
Second: Broadcasting summed data
End of explanation
"""
mnist_train_data = pd.DataFrame.from_csv(
'/home/svanhmic/workspace/data/DABAI/mnist/train.csv',
header=0,index_col=None)
mnist_train_data['label'].count()
from PIL import Image
import math
from matplotlib import pyplot as plt
from pyspark.sql import functions as F
from pyspark.ml.linalg import DenseVector, VectorUDT
pic_1 = mnist_train_data.iloc[2,1:].values.reshape((28,28))
plt.imshow(pic_1, interpolation='nearest',cmap='gray')
plt.show()
"""
Explanation: Mnist dataset
End of explanation
"""
to_vec = F.udf(lambda vec: DenseVector(np.array(vec)), VectorUDT())
mnist_train_df = spark.read.csv(
'/home/svanhmic/workspace/data/DABAI/mnist/train.csv',
inferSchema=True, header=True)
mnist_train_df = mnist_train_df.withColumn('id',F.monotonically_increasing_id())
# mnist_train_df = mnist_train_df.select('label',to_vec(F.array(mnist_train_df.columns[1:])).alias('pixels'))
mnist_train_df.printSchema()
mnist_1_0_train_df = mnist_train_df.filter(F.col('label').isin(0,1))
mnist_1_0_train_df.count()
sampled_no_nans = (mnist_1_0_train_df
.sampleBy('label', fractions={0: 0.03, 1: 0.03})
.withColumn('unknown_label', F.col('label'))
.select('id','unknown_label'))
#Show that it actually has sampled the right elements
sampled_no_nans['id','unknown_label'].show()
sampled_nans = (mnist_1_0_train_df.select('id').cache()
.subtract(sampled_no_nans.select('id'))
.withColumn('unknown_label',F.lit(np.NaN))
)
sampled_nans.show()
"""
Explanation: Spark download
End of explanation
"""
bd_nans = sc.broadcast(sampled_nans.unionAll(sampled_no_nans).rdd.collectAsMap())
add_nan_udf = F.udf(lambda x: bd_nans.value.get(x),T.FloatType())
merged = mnist_train_df.withColumn('unknown_label',add_nan_udf(F.col('id')))
#merged_mnist_1_0_train_df.describe().show()
output = gen_graph(sc, merged, cols=['pixel'+str(i) for i in range(784)])
output.entries.take(5)
matrix = output.rdd.map(lambda x: distributed.MatrixEntry(x['a_id'],x['b_id'],x['weights']))
matrix.take(5)
from sklearn.semi_supervised import LabelPropagation
input_df.show()
pdf_mnist = input_df.toPandas()
data = pdf_mnist.loc[:,['pixel'+str(i) for i in range(784)]]
labels = pdf_mnist.loc[:,'unknown_label'].fillna(-1)
data.describe()
lpa = LabelPropagation(gamma=5.4003)
result = lpa.fit(data.as_matrix(),labels)
prediction = result.predict(data.as_matrix())
pdf_mnist['prediction'] = prediction
pdf_mnist.groupby(['label','prediction'],as_index=False)['id'].count()
from pyspark.ml.clustering import KMeans
vectorizer = ml_feature.VectorAssembler(inputCols=['pixel'+str(i) for i in range(784)],outputCol='features')
conv = ConvertAllToVecToMl.ConvertAllToVecToMl(inputCol='features',outputCol='conved')
scaler = ml_feature.StandardScaler(withMean=True,withStd=True,inputCol='conved', outputCol='scaled')
km = KMeans(featuresCol='scaled')
pipeline = Pipeline(stages=[vectorizer,conv,scaler,km])
model = pipeline.fit(input_df)
model.transform(input_df)['prediction','id'].show()
"""
Explanation: Merging the two data sets
Lets run the algorithm:
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/bcc/cmip6/models/sandbox-1/landice.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'bcc', 'sandbox-1', 'landice')
"""
Explanation: ES-DOC CMIP6 Model Properties - Landice
MIP Era: CMIP6
Institute: BCC
Source ID: SANDBOX-1
Topic: Landice
Sub-Topics: Glaciers, Ice.
Properties: 30 (21 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:39
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Grid
4. Glaciers
5. Ice
6. Ice --> Mass Balance
7. Ice --> Mass Balance --> Basal
8. Ice --> Mass Balance --> Frontal
9. Ice --> Dynamics
1. Key Properties
Land ice key properties
1.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Ice Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify how ice albedo is modelled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Atmospheric Coupling Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
Which variables are passed between the atmosphere and ice (e.g. orography, ice mass)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Oceanic Coupling Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
Which variables are passed between the ocean and ice
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.6. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which variables are prognostically calculated in the ice model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of land ice code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Grid
Land ice grid
3.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.2. Adaptive Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is an adative grid being used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Base Resolution
Is Required: TRUE Type: FLOAT Cardinality: 1.1
The base resolution (in metres), before any adaption
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Resolution Limit
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If an adaptive grid is being used, what is the limit of the resolution (in metres)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.5. Projection
Is Required: TRUE Type: STRING Cardinality: 1.1
The projection of the land ice grid (e.g. albers_equal_area)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Glaciers
Land ice glaciers
4.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of glaciers in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of glaciers, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 4.3. Dynamic Areal Extent
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Does the model include a dynamic glacial extent?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Ice
Ice sheet and ice shelf
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the ice sheet and ice shelf in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 5.2. Grounding Line Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the technique used for modelling the grounding line in the ice sheet-ice shelf coupling
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.3. Ice Sheet
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are ice sheets simulated?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.4. Ice Shelf
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are ice shelves simulated?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Ice --> Mass Balance
Description of the surface mass balance treatment
6.1. Surface Mass Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how and where the surface mass balance (SMB) is calulated. Include the temporal coupling frequeny from the atmosphere, whether or not a seperate SMB model is used, and if so details of this model, such as its resolution
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Ice --> Mass Balance --> Basal
Description of basal melting
7.1. Bedrock
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of basal melting over bedrock
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Ocean
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of basal melting over the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Ice --> Mass Balance --> Frontal
Description of claving/melting from the ice shelf front
8.1. Calving
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of calving from the front of the ice shelf
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Melting
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of melting from the front of the ice shelf
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Ice --> Dynamics
**
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description if ice sheet and ice shelf dynamics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.2. Approximation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Approximation type used in modelling ice dynamics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 9.3. Adaptive Timestep
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there an adaptive time scheme for the ice scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.4. Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep (in seconds) of the ice scheme. If the timestep is adaptive, then state a representative timestep.
End of explanation
"""
|
dimonaks/siman | tutorials/calc_barriers_subroutine.ipynb | gpl-2.0 | import sys
sys.path.extend(['/home/aksenov/Simulation_wrapper/siman'])
import header
from calc_manage import add, res
from database import write_database, read_database
from set_functions import read_vasp_sets
from calc_manage import smart_structure_read
from SSHTools import SSHTools
from project_funcs import calc_barriers
%matplotlib inline
"""
Explanation: Instruction
This tutorial explain how to use calc_barriers wrapper
calc_barriers is high level wrapeer used for calculation of migration barriers.
The calculations are performed by executing the same command for several times.
First run
Calculation of equillibrium lattice constants
Second run
Construction of supercell based on optimized unit cell, and additional relaxation of atomic positions
Third run
Calculation of migration barrier using obtained supercell
params - special dictionary
'jmol' -{} - to save path as png
Import libraries
End of explanation
"""
header.ssh_object = SSHTools()
header.ssh_object.setup(user="aksenov",host="10.30.16.62",pkey="/home/aksenov/.ssh/id_rsa")
header.PATH2PROJECT = 'barriers' # path to project relative to your home folder on cluster
header.PATH2POTENTIALS = '/home/aksenov/scientific_projects/PAW_PBE_VASP' #path to VASP POTENTIALS
header.PATH2NEBMAKE = '~/Simulation_wrapper/vts/nebmake.pl' # add path to nebmake in your project_conf.py
read_database()
header.varset['static'].potdir = {29:'Cu_new', 3:'Li'} #subfolders with required potentials
read_vasp_sets([
('ion', 'static', {'ISIF':2, 'IBRION':1, 'NSW':20, 'EDIFFG':-0.025}, ), # relax only ions
('cell', 'static', {'ISIF':4, 'IBRION':1, 'NSW':20, 'EDIFFG':-0.025},)]) #relax everything except volume
"""
Explanation: Set configuration parameters
End of explanation
"""
add('Cu2', 'static', 1, input_geo_file = 'Cu/Cu2fcc.geo', it_folder = 'Cu')
"""
Explanation: Starting calculation
Choose starting calculation.
For example 2-atom cell of fcc Cu
End of explanation
"""
pd = {
'id':('Cu2', 'static', 1), # starting calculation
'el':'Li', # Element to move
'itfolder':'Cu/', # Workding directory
'main_set':'ion', # This set is used for supercell calculation
'scaling_set':'ion', # This set is used for determining lattice parameters
'neb_set':'ion', # This set is used for calculation of migration barrier
'scale_region':(-4, 4), # range of unit cell uniform deformation in %
'ortho':[7,7,7], # Target sizes of supercell in A
'r_impurity':1.2, # radius of searchable void
'images':5, # number of images in NEB calculation
'start_pos':0, # starting position for NEB; offered by the wrapper
'end_pos':1, # final position for NEB; offered by the wrapper
'readfiles':1, # read OUTCAR files
}
"""
Explanation: Configuration dictionary
The configuration dictionary should be created
End of explanation
"""
calc_barriers('normal', 'Li', 'Li', show_fit = 0, up = 1, upA = 0, upC = 0, param_dic = pd, add_loop_dic = {'run':1})
"""
Explanation: 1. Unit cell optimization
First argument should be normal
Second and third arguments are moving element
up - update unit cell optimization
upA - update supercell calculation
upC - update neb calculation
End of explanation
"""
calc_barriers('normal', 'Li', 'Li', show_fit = 1, up = 0, upA = 0, upC = 0, param_dic = pd, add_loop_dic = {'run':1})
"""
Explanation: 2. Supercell construction
After the optimization is finished, run the same command once again, it will show the fit and construct the supercell.
End of explanation
"""
pd['el'] = 'Cu' # Cu atom is chosen for moving
pd['i_atom_to_move'] = 1 # number of atom to move
pd['rep_moving_atom'] = 'Li' # replace moving atom with Li
"""
Explanation: 3. Read supercell and start NEB calculation using the same calculation
This step uses add_neb subroutine from neb.py
To choose different paths change
pd['start_pos'] and
pd['end_pos'] values
The command suggest you possible values of initial and final positions, see below.
If you want to study migration of substitution atom, then
use additional arguments:
End of explanation
"""
calc_barriers('normal', 'Cu', 'Cu', show_fit = 0, up = 0, upA = 0, upC = 0, param_dic = pd, add_loop_dic = {'run':0})
# after running this command, go to ./xyz/Cu2.su.s7v100.n5Cu2Cu2v1rLi_all and check the created path
"""
Explanation: 3.1 Migration of substitution atom
End of explanation
"""
pd['i_atom_to_move'] = None
pd['rep_moving_atom'] = None
pd['el'] = 'Li'
calc_barriers('normal', 'Li', 'Li', show_fit = 0, up = 0, upA = 0, upC = 1, param_dic = pd, add_loop_dic = {'run':0})
#after the command is finished please check Cu2.su.s7v100.n5i0e1Li_all folder with POSCARs
"""
Explanation: 3.2 Migration of interstitial atom
Attention! This mode relies on C++ routine siman/findpores.cpp; It should be compiled with siman/make_findpores first
End of explanation
"""
#Here we use additional parameter *end_pos_types_z*; it allow to use Cu as final positions for Li migration
from neb import add_neb
st = smart_structure_read('Cu/POSCAR_Cu310A2Liis2_1lo_2_end')
add_neb(st = st, it_new = 'Cu310A2_212Li', ise_new = 'ion', it_folder = 'Cu/neb',
images = 5, i_atom_to_move = 215, i_void_final = 6, end_pos_types_z = [29])
#Check created path in xyz/Cu310A2_212Li.n5Li216Li216v6_all
write_database()
"""
Explanation: 3.3 Using starting cell with Li
Assuming you already have supercell with Li, the migration barrier for its migration can be calculated as follows
using add_neb:
End of explanation
"""
|
sanjayankur31/nest-simulator | doc/userdoc/model_details/noise_generator.ipynb | gpl-2.0 | import sympy
sympy.init_printing()
x = sympy.Symbol('x')
sympy.series((1-sympy.exp(-x))/(1+sympy.exp(-x)), x)
"""
Explanation: The NEST noise_generator
Hans Ekkehard Plesser, 2015-06-25
This notebook describes how the NEST noise_generator model works and what effect it has on model neurons.
NEST needs to be in your PYTHONPATH to run this notebook.
Basics
The noise_generator emits
a piecewise constant current
that changes at fixed intervals $\delta$.
For each interval, a new amplitude is chosen from the normal distribution.
Each target neuron receives a different realization of the current.
To be precise, the output current of the generator is given by
$$I(t) = \mu + \sigma N_j \qquad\text{with $j$ such that}\quad j\delta < t \leq (j+1)\delta$$
where $N_j$ is the value drawn from the zero-mean unit-variance normal distribution for interval $j$ containing $t$.
When using the generator with modulated variance, the noise current is given by
$$I(t) = \mu + \sqrt{\sigma^2 + \sigma_m^2\sin(2\pi f j\delta + \frac{2\pi}{360}\phi_d)} N_j \;.$$
Mathematical symbols match model parameters as follows
|Symbol|Parameter|Unit|Default|Description|
|------|:--------|:---|------:|:----------|
|$\mu$|mean|pA|0 pA|mean of the noise current amplitude|
|$\sigma$|std|pA|0 pA|standard deviation of the noise current amplitude|
|$\sigma_m$|std_mod|pA|0 pA|modulation depth of the std. deviation of the noise current amplitude|
|$\delta$|dt|ms|1 ms|interval between current amplitude changes|
|$f$|frequency|Hz|0 Hz| frequency of variance modulation|
|$\phi_d$|phase|[deg]|0$^{\circ}$| phase of variance modulation|
For the remainder of this document, we will only consider the current at time points $t_j=j\delta$ and define
$$I_j = I(t_j+) = \mu + \sigma N_j $$
and correspondingly for the case of modulated noise. Note that $I_j$ is thus the current emitted during $(t_j, t_{j+1}]$, following NEST's use of left-open, right-closed intervals. We also set $\omega=2\pi f$ and $\phi=\frac{2\pi}{360}\phi_d$ for brevity.
Properties of the noise current
The noise current is a piecewise constant current. Thus, it is only an approximation to white noise and the properties of the noise will depend on the update interval $\delta$. The default update interval is $\delta = 1$ms. We chose this value so that the default would be independent from the time step $h$ of the simulation, assuming that time steps larger than 1 ms are rarely used. It also is plausible to assume that most time steps chosen will divide 1 ms evenly, so that changes in current amplitude will coincide with time steps. If this is not the case, the subsequent analysis does not apply exactly.
The currents to all targets of a noise generator have different amplitudes, but always change simultaneously at times $j\delta$.
Across an ensemble of targets or realizations, we have
\begin{align}
\langle I_j\rangle &= \mu \
\langle \Delta I_j^2\rangle &= \sigma^2 \qquad \text{without modulation} \
\langle \Delta I_j^2\rangle &= \sigma^2 + \sigma_m^2\sin( \omega j\delta + \phi) \qquad \text{with modulation.}
\end{align}
Without modulation, the autocorrelation of the noise is given by
$$\langle (I_j-\mu) (I_k-\mu)\rangle = \sigma^2\delta_{jk}$$
where $\delta_{jk}$ is Kronecker's delta.
With modulation, the autocorrlation is
$$\langle (I_j-\mu) (I_k-\mu)\rangle = \sigma_j^2\delta_{jk}\qquad\text{where}\; \sigma_j = \sqrt{\sigma^2 + \sigma_m^2\sin( j\delta\omega + \phi_d)}\;.$$
Note that it is currently not possible to record this noise current directly in NEST, since a multimeter cannot record from a noise_generator.
Noise generators effect on a neuron
Precisely how a current injected into a neuron will affect that neuron, will obviously depend on the neuron itself. We consider here the subthreshold dynamics most widely used in NEST, namely the leaky integrator. The analysis that follows is applicable directly to all iaf_psc_* models. It applies to conductance based neurons such as the iaf_cond_* models only as long as no synaptic input is present, which changes the membrane conductances.
Membrane potential dynamics
We focus here only on subthreshold dynamics, i.e., we assume that the firing threshold of the neuron is $V_{\text{th}}=\infty$. We also ignore all synaptic input, which is valid for linear models, and set the resting potential $E_L=0$ mV for convenience. The membrane potential $V$ is then governed by
$$\dot{V} = - \frac{V}{\tau} + \frac{I}{C}$$
where $\tau$ is the membrane time constant and $C$ the capacitance. We further assume $V(0)=0$ mV. We now focus on the membrane potential at times $t_j=j\delta$. Let $V_j=V(j\delta)$ be the membrane potential at time $t_j$. Then, a constant currant $I_j$ will be applied to the neuron until $t_{j+1}=t_j+\delta$, at which time the membrane potential will be
$$V_{j+1} = V_j e^{-\delta/\tau} + \left(1-e^{-\delta/\tau}\right)\frac{I_j\tau}{C} \;.$$
We can apply this backward in time towards $V_0=0$
\begin{align}
V_{j+1} &= V_j e^{-\delta/\tau} + \left(1-e^{-\delta/\tau}\right)\frac{I_j\tau}{C} \
&= \left[V_{j-1} e^{-\delta/\tau} + \left(1-e^{-\delta/\tau}\right)\frac{I_{j-1}\tau}{C}\right]
e^{-\delta/\tau} + \left(1-e^{-\delta/\tau}\right)\frac{I_j\tau}{C} \
&= \left(1-e^{-\delta/\tau}\right)\frac{\tau}{C}\sum_{k=0}^{j} I_k e^{-(j-k)\delta/\tau} \
&= \left(1-e^{-\delta/\tau}\right)\frac{\tau}{C}\sum_{k=0}^{j} I_{k} e^{-k\delta/\tau} \;.
\end{align}
In the last step, we exploited the mutual independence of the random current amplitudes $I_k$, which allows us to renumber them arbitratily.
Mean and variance of the membrane potential
The mean of the membrane potential at $t_{j+1}$ is thus
\begin{align}
\langle V_{j+1}\rangle &= \left(1-e^{-\delta/\tau}\right)\frac{\tau}{C}\sum_{k=0}^{j} \langle I_{k} \rangle e^{-k\delta/\tau}\
&= \frac{\mu\tau}{C}\left(1-e^{-\delta/\tau}\right)\sum_{k=0}^{j} e^{-k\delta/\tau}\
&= \frac{\mu\tau}{C}\left(1-e^{-(j+1)\delta/\tau}\right)\
&= \frac{\mu\tau}{C}\left(1-e^{-t_{j+1}/\tau}\right)
\end{align}
as expected; note that we used the geometric sum formula in the second step.
To obtain the variance of the membrane potential at $t_{j+1}$, we first compute the second moment
$$\langle V_{j+1}^2 \rangle = \frac{\tau^2}{C^2}\left(1-e^{-\delta/\tau}\right)^2 \left\langle\left(\sum_{k=0}^{j} I_{k} e^{-k\delta/\tau}\right)^2\right\rangle$$
Substituting $q = e^{-\delta/\tau}$ and $\alpha = \frac{\tau^2}{C^2}\left(1-e^{-\delta/\tau}\right)^2= \frac{\tau^2}{C^2}\left(1-q\right)^2$ and , we have
\begin{align}
\langle V_{j+1}^2 \rangle &= \alpha \left\langle\left(\sum_{k=0}^{j} I_{k} q^k\right)^2\right\rangle \
&= \alpha \sum_{k=0}^{j} \sum_{m=0}^{j} \langle I_k I_m \rangle q^{k+m} \
&= \alpha \sum_{k=0}^{j} \sum_{m=0}^{j} (\mu^2 + \sigma_k^2 \delta_{km}) q^{k+m} \
&= \alpha \mu^2 \left(\sum_{k=0}^j q^k\right)^2 + \alpha \sum_{k=0}^{j} \sigma_k^2 q^{2k} \
&= \langle V_{j+1}\rangle^2 + \alpha \sum_{k=0}^{j} \sigma_k^2 q^{2k} \;.
\end{align}
Evaluating the remaining sum for the modulate case will be tedious, so we focus for now on the unmodulated case, i.e., $\sigma\equiv\sigma_k$, so that we again are left with a geometric sum, this time over $q^2$. We can now subtract the square of the mean to obtain the variance
\begin{align}
\langle (\Delta V_{j+1})^2 \rangle &= \langle V_{j+1}^2 \rangle - \langle V_{j+1}\rangle^2 \
&= \alpha \sigma^2 \frac{q^{2(j+1)}-1}{q^2-1} \
&= \frac{\sigma^2\tau^2}{C^2} (1-q)^2 \frac{q^{2(j+1)}-1}{q^2-1} \
&= \frac{\sigma^2\tau^2}{C^2} \frac{1-q}{1+q}\left(1-q^{2(j+1)}\right) \
&= \frac{\sigma^2\tau^2}{C^2} \frac{1-e^{-\delta/\tau}}{1+e^{-\delta/\tau}}\left(1-e^{-2t_{j+1}/\tau}\right) \;.
\end{align}
In the last step, we used that $1-q^2=(1-q)(1+q)$.
The last term in this expression describes the approach of the variance of the membrane potential to its steady-state value. The fraction in front of it describes the effect of switching current amplitudes at intervals $\delta$ instead of instantenously as in real white noise.
We now have in the long-term limit
$$\langle (\Delta V)^2 \rangle = \lim_{j\to\infty} \langle (\Delta V_{j+1})^2 \rangle
= \frac{\sigma^2\tau^2}{C^2} \frac{1-e^{-\delta/\tau}}{1+e^{-\delta/\tau}} \;. $$
We expand the fraction:
End of explanation
"""
import math
import numpy as np
import scipy
import matplotlib.pyplot as plt
%matplotlib inline
def noise_params(V_mean, V_std, dt=1.0, tau_m=10., C_m=250.):
'Returns mean and std for noise generator for parameters provided; defaults for iaf_psc_alpha.'
return C_m / tau_m * V_mean, math.sqrt(2/(tau_m*dt))*C_m*V_std
def V_asymptotic(mu, sigma, dt=1.0, tau_m=10., C_m=250.):
'Returns asymptotic mean and std of V_m'
V_mean = mu * tau_m / C_m
V_std = (sigma * tau_m / C_m) * np.sqrt(( 1 - math.exp(-dt/tau_m) ) / ( 1 + math.exp(-dt/tau_m) ))
return V_mean, V_std
def V_mean(t, mu, tau_m=10., C_m=250.):
'Returns predicted voltage for given times and parameters.'
vm, _ = V_asymptotic(mu, sigma, tau_m=tau_m, C_m=C_m)
return vm * ( 1 - np.exp( - t / tau_m ) )
def V_std(t, sigma, dt=1.0, tau_m=10., C_m=250.):
'Returns predicted variance for given times and parameters.'
_, vms = V_asymptotic(mu, sigma, dt=dt, tau_m=tau_m, C_m=C_m)
return vms * np.sqrt(1 - np.exp(-2*t/tau_m))
import nest
def simulate(mu, sigma, dt=1.0, tau_m=10., C_m=250., N=1000, t_max=50.):
'''
Simulate an ensemble of N iaf_psc_alpha neurons driven by noise_generator.
Returns
- voltage matrix, one column per neuron
- time axis indexing matrix rows
- time shift due to delay, time at which first current arrives
'''
resolution = 0.1
delay = 1.0
nest.ResetKernel()
nest.resolution = resolution
ng = nest.Create('noise_generator', params={'mean': mu, 'std': sigma, 'dt': dt})
vm = nest.Create('voltmeter', params={'interval': resolution})
nrns = nest.Create('iaf_psc_alpha', N, params={'E_L': 0., 'V_m': 0., 'V_th': 1e6,
'tau_m': tau_m, 'C_m': C_m})
nest.Connect(ng, nrns, syn_spec={'delay': delay})
nest.Connect(vm, nrns)
nest.Simulate(t_max)
# convert data into time axis vector and matrix with one column per neuron
t, s, v = vm.events['times'], vm.events['senders'], vm.events['V_m']
tix = np.array(np.round(( t - t.min() ) / resolution), dtype=int)
sx = np.unique(s)
assert len(sx) == N
six = s - s.min()
V = np.zeros((tix.max()+1, N))
for ix, vm in enumerate(v):
V[tix[ix], six[ix]] = vm
# time shift due to delay and onset after first step
t_shift = delay + resolution
return V, np.unique(t), t_shift
"""
Explanation: We thus have for $\delta \ll \tau$ and $t\gg\tau$
$$\langle (\Delta V)^2 \rangle
\approx \frac{\delta\tau \sigma^2 }{2 C^2} \;.$$
How to obtain a specific mean and variance of the potential
In order to obtain a specific mean membrane potential $\bar{V}$ with standard deviation $\Sigma$ for given neuron parameters $\tau$ and $C$ and fixed current-update interval $\delta$, we invert the expressions obtained above.
For the mean, we have for $t\to\infty$
$$\langle V\rangle = \frac{\mu\tau}{C} \qquad\Rightarrow\qquad \mu = \frac{C}{\tau} \bar{V}$$
and for the standard deviation
$$\langle (\Delta V)^2 \rangle \approx \frac{\delta\tau \sigma^2 }{2 C^2}
\qquad\Rightarrow\qquad \sigma = \sqrt{\frac{2}{\delta\tau}}C\Sigma \;.$$
Tests and examples
We will now test the expressions derived above against NEST. We first define some helper functions.
End of explanation
"""
dt = 1.0
mu, sigma = noise_params(0., 1., dt=dt)
print("mu = {:.2f}, sigma = {:.2f}".format(mu, sigma))
V, t, ts = simulate(mu, sigma, dt=dt)
V_mean_th = V_mean(t, mu)
V_std_th = V_std(t, sigma, dt=dt)
plt.plot(t, V.mean(axis=1), 'b-', label=r'$\bar{V_m}$')
plt.plot(t + ts, V_mean_th, 'b--', label=r'$\langle V_m \rangle$')
plt.plot(t, V.std(axis=1), 'r-', label=r'$\sqrt{\bar{\Delta V_m^2}}$')
plt.plot(t + ts, V_std_th, 'r--', label=r'$\sqrt{\langle (\Delta V_m)^2 \rangle}$')
plt.legend()
plt.xlabel('Time $t$ [ms]')
plt.ylabel('Membrane potential $V_m$ [mV]')
plt.xlim(0, 50);
"""
Explanation: A first test simulation
End of explanation
"""
dt = 1.0
mu, sigma = noise_params(2., 1., dt=dt)
print("mu = {:.2f}, sigma = {:.2f}".format(mu, sigma))
V, t, ts = simulate(mu, sigma, dt=dt)
V_mean_th = V_mean(t, mu)
V_std_th = V_std(t, sigma, dt=dt)
plt.plot(t, V.mean(axis=1), 'b-', label=r'$\bar{V_m}$')
plt.plot(t + ts, V_mean_th, 'b--', label=r'$\langle V_m \rangle$')
plt.plot(t, V.std(axis=1), 'r-', label=r'$\sqrt{\bar{\Delta V_m^2}}$')
plt.plot(t + ts, V_std_th, 'r--', label=r'$\sqrt{\langle (\Delta V_m)^2 \rangle}$')
plt.legend()
plt.xlabel('Time $t$ [ms]')
plt.ylabel('Membrane potential $V_m$ [mV]')
plt.xlim(0, 50);
"""
Explanation: Theory and simulation are in excellent agreement. The regular "drops" in the standard deviation are a consquence of the piecewise constant current and the synchronous switch in current for all neurons. It is discussed in more detail below.
A case with non-zero mean
We repeat the previous simulation, but now with non-zero mean current.
End of explanation
"""
dt = 0.1
mu, sigma = noise_params(0., 1., dt=dt)
print("mu = {:.2f}, sigma = {:.2f}".format(mu, sigma))
V, t, ts = simulate(mu, sigma, dt=dt)
V_mean_th = V_mean(t, mu)
V_std_th = V_std(t, sigma, dt=dt)
plt.plot(t, V.mean(axis=1), 'b-', label=r'$\bar{V_m}$')
plt.plot(t + ts, V_mean_th, 'b--', label=r'$\langle V_m \rangle$')
plt.plot(t, V.std(axis=1), 'r-', label=r'$\sqrt{\bar{\Delta V_m^2}}$')
plt.plot(t + ts, V_std_th, 'r--', label=r'$\sqrt{\langle (\Delta V_m)^2 \rangle}$')
plt.legend()
plt.xlabel('Time $t$ [ms]')
plt.ylabel('Membrane potential $V_m$ [mV]')
plt.xlim(0, 50);
"""
Explanation: We again observe excellent agreement between theory and simulation.
Shorter and longer switching intervals
We now repeat the previous simulation for zero mean with shorter ($\delta=0.1$ ms) and longer ($\delta=10$ ms) switching intervals.
End of explanation
"""
dt = 10.0
mu, sigma = noise_params(0., 1., dt=dt)
print("mu = {:.2f}, sigma = {:.2f}".format(mu, sigma))
V, t, ts = simulate(mu, sigma, dt=dt)
V_mean_th = V_mean(t, mu)
V_std_th = V_std(t, sigma, dt=dt)
plt.plot(t, V.mean(axis=1), 'b-', label=r'$\bar{V_m}$')
plt.plot(t + ts, V_mean_th, 'b--', label=r'$\langle V_m \rangle$')
plt.plot(t, V.std(axis=1), 'r-', label=r'$\sqrt{\bar{\Delta V_m^2}}$')
plt.plot(t + ts, V_std_th, 'r--', label=r'$\sqrt{\langle (\Delta V_m)^2 \rangle}$')
plt.legend()
plt.xlabel('Time $t$ [ms]')
plt.ylabel('Membrane potential $V_m$ [mV]')
plt.xlim(0, 50);
"""
Explanation: Again, agreement is fine and the slight drooping artefacts are invisible, since the noise is now updated on every time step. Note also that the noise standard deviation $\sigma$ is larger (by $\sqrt{10}$) than for $\delta=1$ ms.
End of explanation
"""
plt.plot(t, V[:, :25], lw=3, alpha=0.5);
plt.plot([31.1, 31.1], [-3, 3], 'k--', lw=2)
plt.plot([41.1, 41.1], [-3, 3], 'k--', lw=2)
plt.xlabel('Time $t$ [ms]')
plt.ylabel('Membrane potential $V_m$ [mV]')
plt.xlim(30, 42);
plt.ylim(-2.1, 2.1);
"""
Explanation: For $\delta=10$, i.e., a noise switching time equal to $\tau_m$, the drooping artefact becomes clearly visible. Note that our theory developed above only applies to the points at which the input current switches, i.e., at multiples of $\delta$, beginning with the arrival of the first current at the neuron (at delay plus one time step). At those points, agreement with theory is good.
Why does the standard deviation dip between current updates?
In the last case, where $\delta = \tau_m$, the dips in the membrane potential between changes in the noise current become quite large. They can be explained as follows. For large $\delta$, we have at the end of a $\delta$-interval for neuron $n$ membrane potential $V_n(t_{j})\approx I_{n,j-1}\tau/C$ and these values will be distributed across neurons with standard deviation $\sqrt{\langle (\Delta V_m)^2 \rangle}$. Then, input currents of all neurons switch to new values $I_{n,j}$ and the membrane potential of each neuron now evolves towards $V_n(t_{j+1})\approx I_{n,j}\tau/C$. Since current values are independent of each other, this means that membrane-potential trajectories criss-cross each other, constricting the variance of the membrane potential before they approach their new steady-state values, as illustrated below.
You should therefore use short switching times $\delta$.
End of explanation
"""
from scipy.signal import fftconvolve
from statsmodels.tsa.stattools import acf
def V_autocorr(V_mean, V_std, dt=1., tau_m=10.):
'Returns autocorrelation of membrane potential and pertaining time axis.'
mu, sigma = noise_params(V_mean, V_std, dt=dt, tau_m=tau_m)
V, t, ts = simulate(mu, sigma, dt=dt, tau_m=tau_m, t_max=5000., N=20)
# drop the first second
V = V[t>1000., :]
# compute autocorrelation columnwise, then average over neurons
nlags = 1000
nt, nn = V.shape
acV = np.zeros((nlags+1, nn))
for c in range(V.shape[1]):
acV[:, c] = acf(V[:, c], unbiased=True, nlags=1000, fft=True)
#fftconvolve(V[:, c], V[::-1, c], mode='full') / V[:, c].std()**2
acV = acV.mean(axis=1)
# time axis
dt = t[1] - t[0]
acT = np.arange(0, nlags+1) * dt
return acV, acT
acV_01, acT_01 = V_autocorr(0., 1., 0.1)
acV_10, acT_10 = V_autocorr(0., 1., 1.0)
acV_50, acT_50 = V_autocorr(0., 1., 5.0)
plt.plot(acT_01, acV_01, label=r'$\delta = 0.1$ms');
plt.plot(acT_10, acV_10, label=r'$\delta = 1.0$ms');
plt.plot(acT_50, acV_50, label=r'$\delta = 5.0$ms');
plt.xlim(0, 50);
plt.ylim(-0.1, 1.05);
plt.legend();
plt.xlabel(r'Delay $\tau$ [ms]')
plt.ylabel(r'$\langle V(t)V(t+\tau)\rangle$');
"""
Explanation: Autocorrelation
We briefly look at the autocorrelation of the membrane potential for three values of $\delta$.
End of explanation
"""
acV_t01, acT_t01 = V_autocorr(0., 1., 0.1, 1.)
acV_t05, acT_t05 = V_autocorr(0., 1., 0.1, 5.)
acV_t10, acT_t10 = V_autocorr(0., 1., 0.1, 10.)
plt.plot(acT_t01, acV_t01, label=r'$\tau_m = 1$ms');
plt.plot(acT_t05, acV_t05, label=r'$\tau_m = 5$ms');
plt.plot(acT_t10, acV_t10, label=r'$\tau_m = 10$ms');
plt.xlim(0, 50);
plt.ylim(-0.1, 1.05);
plt.legend();
plt.xlabel(r'Delay $\tau$ [ms]')
plt.ylabel(r'$\langle V(t)V(t+\tau)\rangle$');
"""
Explanation: We see that the autocorrelation is clearly dominated by the membrane time constant of $\tau_m=10$ ms. The switching time $\delta$ has a lesser effect, although it is noticeable for $\delta=5$ ms.
Different membrane time constants
To document the influence of the membrane time constant, we compute the autocorrelation function for three different $\tau_m$.
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.23/_downloads/ead9220acec394667b95e490359e08e7/70_point_spread.ipynb | bsd-3-clause | import os.path as op
import numpy as np
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, apply_inverse
from mne.simulation import simulate_stc, simulate_evoked
"""
Explanation: Corrupt known signal with point spread
The aim of this tutorial is to demonstrate how to put a known signal at a
desired location(s) in a :class:mne.SourceEstimate and then corrupt the
signal with point-spread by applying a forward and inverse solution.
End of explanation
"""
seed = 42
# parameters for inverse method
method = 'sLORETA'
snr = 3.
lambda2 = 1.0 / snr ** 2
# signal simulation parameters
# do not add extra noise to the known signals
nave = np.inf
T = 100
times = np.linspace(0, 1, T)
dt = times[1] - times[0]
# Paths to MEG data
data_path = sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-fwd.fif')
fname_inv = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-meg-fixed-inv.fif')
fname_evoked = op.join(data_path, 'MEG', 'sample',
'sample_audvis-ave.fif')
"""
Explanation: First, we set some parameters.
End of explanation
"""
fwd = mne.read_forward_solution(fname_fwd)
fwd = mne.convert_forward_solution(fwd, force_fixed=True, surf_ori=True,
use_cps=False)
fwd['info']['bads'] = []
inv_op = read_inverse_operator(fname_inv)
raw = mne.io.read_raw_fif(op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw.fif'))
raw.set_eeg_reference(projection=True)
events = mne.find_events(raw)
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2}
epochs = mne.Epochs(raw, events, event_id, baseline=(None, 0), preload=True)
epochs.info['bads'] = []
evoked = epochs.average()
labels = mne.read_labels_from_annot('sample', subjects_dir=subjects_dir)
label_names = [label.name for label in labels]
n_labels = len(labels)
"""
Explanation: Load the MEG data
End of explanation
"""
cov = mne.compute_covariance(epochs, tmin=None, tmax=0.)
"""
Explanation: Estimate the background noise covariance from the baseline period
End of explanation
"""
# The known signal is all zero-s off of the two labels of interest
signal = np.zeros((n_labels, T))
idx = label_names.index('inferiorparietal-lh')
signal[idx, :] = 1e-7 * np.sin(5 * 2 * np.pi * times)
idx = label_names.index('rostralmiddlefrontal-rh')
signal[idx, :] = 1e-7 * np.sin(7 * 2 * np.pi * times)
"""
Explanation: Generate sinusoids in two spatially distant labels
End of explanation
"""
hemi_to_ind = {'lh': 0, 'rh': 1}
for i, label in enumerate(labels):
# The `center_of_mass` function needs labels to have values.
labels[i].values.fill(1.)
# Restrict the eligible vertices to be those on the surface under
# consideration and within the label.
surf_vertices = fwd['src'][hemi_to_ind[label.hemi]]['vertno']
restrict_verts = np.intersect1d(surf_vertices, label.vertices)
com = labels[i].center_of_mass(subject='sample',
subjects_dir=subjects_dir,
restrict_vertices=restrict_verts,
surf='white')
# Convert the center of vertex index from surface vertex list to Label's
# vertex list.
cent_idx = np.where(label.vertices == com)[0][0]
# Create a mask with 1 at center vertex and zeros elsewhere.
labels[i].values.fill(0.)
labels[i].values[cent_idx] = 1.
"""
Explanation: Find the center vertices in source space of each label
We want the known signal in each label to only be active at the center. We
create a mask for each label that is 1 at the center vertex and 0 at all
other vertices in the label. This mask is then used when simulating
source-space data.
End of explanation
"""
stc_gen = simulate_stc(fwd['src'], labels, signal, times[0], dt,
value_fun=lambda x: x)
"""
Explanation: Create source-space data with known signals
Put known signals onto surface vertices using the array of signals and
the label masks (stored in labels[i].values).
End of explanation
"""
kwargs = dict(subjects_dir=subjects_dir, hemi='split', smoothing_steps=4,
time_unit='s', initial_time=0.05, size=1200,
views=['lat', 'med'])
clim = dict(kind='value', pos_lims=[1e-9, 1e-8, 1e-7])
brain_gen = stc_gen.plot(clim=clim, **kwargs)
"""
Explanation: Plot original signals
Note that the original signals are highly concentrated (point) sources.
End of explanation
"""
evoked_gen = simulate_evoked(fwd, stc_gen, evoked.info, cov, nave,
random_state=seed)
# Map the simulated sensor-space data to source-space using the inverse
# operator.
stc_inv = apply_inverse(evoked_gen, inv_op, lambda2, method=method)
"""
Explanation: Simulate sensor-space signals
Use the forward solution and add Gaussian noise to simulate sensor-space
(evoked) data from the known source-space signals. The amount of noise is
controlled by nave (higher values imply less noise).
End of explanation
"""
brain_inv = stc_inv.plot(**kwargs)
"""
Explanation: Plot the point-spread of corrupted signal
Notice that after applying the forward- and inverse-operators to the known
point sources that the point sources have spread across the source-space.
This spread is due to the minimum norm solution so that the signal leaks to
nearby vertices with similar orientations so that signal ends up crossing the
sulci and gyri.
End of explanation
"""
|
superbobry/pymc3 | pymc3/examples/rugby_analytics.ipynb | apache-2.0 | !date
import numpy as np
import pandas as pd
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
%matplotlib inline
import pymc3 as pm, theano.tensor as tt
"""
Explanation: A Hierarchical model for Rugby prediction
@Author: Peadar Coyle
@email: [email protected]
@date: 31/12/15
I came across the following blog post on http://danielweitzenfeld.github.io/passtheroc/blog/2014/10/28/bayes-premier-league/
* Based on the work of Baio and Blangiardo
In this example, we're going to reproduce the first model described in the paper using PyMC3.
Since I am a rugby fan I decide to apply the results of the paper Bayesian Football to the Six Nations.
Rugby is a physical sport popular worldwide.
* Six Nations consists of Italy, Ireland, Scotland, England, France and Wales
* Game consists of scoring tries (similar to touch downs) or kicking the goal.
* Average player is something like 100kg and 1.82m tall.
* Paul O'Connell the Irish captain is Height: 6' 6" (1.98 m) Weight: 243 lbs (110 kg)
We will use a data set only consisting of the Six Nations 2014 data, and use this to build a generative and explainable model about the Six Nations 2015.
Motivation
Your estimate of the strength of a team depends on your estimates of the other strengths
Ireland are a stronger team than Italy for example - but by how much?
Source for Results 2014 are Wikipedia.
I handcrafted these results
Small data
* We want to infer a latent parameter - that is the 'strength' of a team based only on their scoring intensity, and all we have are their scores and results, we can't accurately measure the 'strength' of a team.
* Probabilistic Programming is a brilliant paradigm for modeling these latent parameters
End of explanation
"""
data_csv = StringIO("""home_team,away_team,home_score,away_score
Wales,Italy,23,15
France,England,26,24
Ireland,Scotland,28,6
Ireland,Wales,26,3
Scotland,England,0,20
France,Italy,30,10
Wales,France,27,6
Italy,Scotland,20,21
England,Ireland,13,10
Ireland,Italy,46,7
Scotland,France,17,19
England,Wales,29,18
Italy,England,11,52
Wales,Scotland,51,3
France,Ireland,20,22""")
"""
Explanation: This is a Rugby prediction exercise. So we'll input some data
End of explanation
"""
df = pd.read_csv(data_csv)
teams = df.home_team.unique()
teams = pd.DataFrame(teams, columns=['team'])
teams['i'] = teams.index
df = pd.merge(df, teams, left_on='home_team', right_on='team', how='left')
df = df.rename(columns = {'i': 'i_home'}).drop('team', 1)
df = pd.merge(df, teams, left_on='away_team', right_on='team', how='left')
df = df.rename(columns = {'i': 'i_away'}).drop('team', 1)
observed_home_goals = df.home_score.values
observed_away_goals = df.away_score.values
home_team = df.i_home.values
away_team = df.i_away.values
num_teams = len(df.i_home.drop_duplicates())
num_games = len(home_team)
g = df.groupby('i_away')
att_starting_points = np.log(g.away_score.mean())
g = df.groupby('i_home')
def_starting_points = -np.log(g.away_score.mean())
"""
Explanation: What do we want to infer?
We want to infer the latent paremeters (every team's strength) that are generating the data we observe (the scorelines).
Moreover, we know that the scorelines are a noisy measurement of team strength, so ideally, we want a model that makes it easy to quantify our uncertainty about the underlying strengths.
Often we don't know what the Bayesian Model is explicitly, so we have to 'estimate' the Bayesian Model'
If we can't solve something, approximate it.
Markov-Chain Monte Carlo (MCMC) instead draws samples from the posterior.
Fortunately, this algorithm can be applied to almost any model.
What do we want?
We want to quantify our uncertainty
We want to also use this to generate a model
We want the answers as distributions not point estimates
What assumptions do we know for our 'generative story'?
We know that the Six Nations in Rugby only has 6 teams - they each play each other once
We have data from last year!
We also know that in sports scoring is modelled as a Poisson distribution
We consider home advantage to be a strong effect in sports
The model.
The league is made up by a total of T= 6 teams, playing each other once
in a season. We indicate the number of points scored by the home and the away team in the g-th game of the season (15 games) as $y_{g1}$ and $y_{g2}$ respectively. </p>
The vector of observed counts $\mathbb{y} = (y_{g1}, y_{g2})$ is modelled as independent Poisson:
$y_{gi}| \theta_{gj} \tilde\;\; Poisson(\theta_{gj})$
where the theta parameters represent the scoring intensity in the g-th game for the team playing at home (j=1) and away (j=2), respectively.</p>
We model these parameters according to a formulation that has been used widely in the statistical literature, assuming a log-linear random effect model:
$$log \theta_{g1} = home + att_{h(g)} + def_{a(g)} $$
$$log \theta_{g2} = att_{a(g)} + def_{h(g)}$$
The parameter home represents the advantage for the team hosting the game and we assume that this effect is constant for all the teams and throughout the season
The scoring intensity is determined jointly by the attack and defense ability of the two teams involved, represented by the parameters att and def, respectively
Conversely, for each t = 1, ..., T, the team-specific effects are modelled as exchangeable from a common distribution:
$att_{t} \; \tilde\;\; Normal(\mu_{att},\tau_{att})$ and $def_{t} \; \tilde\;\;Normal(\mu_{def},\tau_{def})$
End of explanation
"""
model = pm.Model()
with pm.Model() as model:
# global model parameters
home = pm.Normal('home', 0, .0001)
tau_att = pm.Gamma('tau_att', .1, .1)
tau_def = pm.Gamma('tau_def', .1, .1)
intercept = pm.Normal('intercept', 0, .0001)
# team-specific model parameters
atts_star = pm.Normal("atts_star",
mu =0,
tau =tau_att,
shape=num_teams)
defs_star = pm.Normal("defs_star",
mu =0,
tau =tau_def,
shape=num_teams)
atts = pm.Deterministic('atts', atts_star - tt.mean(atts_star))
defs = pm.Deterministic('defs', defs_star - tt.mean(defs_star))
home_theta = tt.exp(intercept + home + atts[away_team] + defs[home_team])
away_theta = tt.exp(intercept + atts[away_team] + defs[home_team])
# likelihood of observed data
home_points = pm.Poisson('home_points', mu=home_theta, observed=observed_home_goals)
away_points = pm.Poisson('away_points', mu=away_theta, observed=observed_away_goals)
"""
Explanation: We did some munging above and adjustments of the data to make it tidier for our model.
The log function to away scores and home scores is a standard trick in the sports analytics literature
Building of the model
We now build the model in PyMC3, specifying the global parameters, and the team-specific parameters and the likelihood function
End of explanation
"""
with model:
start = pm.find_MAP()
step = pm.NUTS(state=start)
trace = pm.sample(2000, step, start=start, progressbar=True)
pm.traceplot(trace)
"""
Explanation: We specified the model and the likelihood function
All this runs on a Theano graph under the hood
Now we need to fit our model using the Maximum A Posteriori algorithm to decide where to start out No U Turn Sampler
End of explanation
"""
pm.forestplot(trace, vars=['atts'], ylabels=['France', 'Ireland', 'Scotland', 'Italy', 'England', 'Wales'], main="Team Offense")
pm.forestplot(trace, vars=['defs'], ylabels=['France', 'Ireland', 'Scotland', 'Italy', 'England', 'Wales'], main="Team Defense")
"""
Explanation: Results
From the above we can start to understand the different distributions of attacking strength and defensive strength.
These are probabilistic estimates and help us better understand the uncertainty in sports analytics
End of explanation
"""
df_trace = pm.trace_to_dataframe(trace[:1000])
import seaborn as sns
df_trace_att = df_trace[['atts_star__0','atts_star__1',
'atts_star__2',
'atts_star__3',
'atts_star__4',
'atts_star__5']]
df_trace_att.rename(columns={'atts_star__0':'atts_star_france','atts_star__1':'atts_star_ireland',
'atts_star__2':'atts_star_scotland',
'atts_star__3':'atts_star_italy',
'atts_star__4':'atts_star_england',
'atts_star__5':'atts_star_wales'}, inplace=True)
_ = sns.pairplot(df_trace_att)
"""
Explanation: Covariates.
We should do some exploration. of the variables too
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | quests/dei/xgboost_caip_e2e.ipynb | apache-2.0 | #You'll need to install XGBoost on the TF instance
!pip3 install xgboost==0.90 witwidget --user --quiet
"""
Explanation: Cloud AI Platform + What-if Tool: end-to-end XGBoost example
This notebook shows how to:
* Build a binary classification model with XGBoost trained on a mortgage dataset
* Deploy the model to Cloud AI Platform
* Use the What-if Tool on your deployed model
End of explanation
"""
import pandas as pd
import xgboost as xgb
import numpy as np
import collections
import witwidget
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.utils import shuffle
from witwidget.notebook.visualization import WitWidget, WitConfigBuilder
"""
Explanation: After doing a pip install, restart your kernel by selecting kernel from the menu and clicking Restart Kernel before proceeding further
End of explanation
"""
# Use a small subset of the data since the original dataset is too big for Colab (2.5GB)
# Data source: https://www.ffiec.gov/hmda/hmdaflat.htm
!gsutil cp gs://mortgage_dataset_files/mortgage-small.csv .
# Set column dtypes for Pandas
COLUMN_NAMES = collections.OrderedDict({
'as_of_year': np.int16,
'agency_code': 'category',
'loan_type': 'category',
'property_type': 'category',
'loan_purpose': 'category',
'occupancy': np.int8,
'loan_amt_thousands': np.float64,
'preapproval': 'category',
'county_code': np.float64,
'applicant_income_thousands': np.float64,
'purchaser_type': 'category',
'hoepa_status': 'category',
'lien_status': 'category',
'population': np.float64,
'ffiec_median_fam_income': np.float64,
'tract_to_msa_income_pct': np.float64,
'num_owner_occupied_units': np.float64,
'num_1_to_4_family_units': np.float64,
'approved': np.int8
})
# Load data into Pandas
data = pd.read_csv(
'mortgage-small.csv',
index_col=False,
dtype=COLUMN_NAMES
)
data = data.dropna()
data = shuffle(data, random_state=2)
data.head()
# Label preprocessing
labels = data['approved'].values
# See the distribution of approved / denied classes (0: denied, 1: approved)
print(data['approved'].value_counts())
data = data.drop(columns=['approved'])
# Convert categorical columns to dummy columns
dummy_columns = list(data.dtypes[data.dtypes == 'category'].index)
data = pd.get_dummies(data, columns=dummy_columns)
# Preview the data
data.head()
"""
Explanation: Download and pre-process data
In this section we'll:
* Download a subset of the mortgage dataset from Google Cloud Storage
* Because XGBoost requires all columns to be numerical, we'll convert all categorical columns to dummy columns (0 or 1 values for each possible category value)
* Note that we've already done some pre-processing on the original dataset to convert value codes to strings: for example, an agency code of 1 becomes Office of the Comptroller of the Currency (OCC)
End of explanation
"""
# Split the data into train / test sets
x,y = data,labels
x_train,x_test,y_train,y_test = train_test_split(x,y)
# Train the model, this will take a few minutes to run
bst = xgb.XGBClassifier(
objective='reg:logistic'
)
bst.fit(x_train, y_train)
# Get predictions on the test set and print the accuracy score
y_pred = bst.predict(x_test)
acc = accuracy_score(y_test, y_pred.round())
print(acc, '\n')
# Print a confusion matrix
print('Confusion matrix:')
cm = confusion_matrix(y_test, y_pred.round())
cm = cm / cm.astype(np.float).sum(axis=1)
print(cm)
# Save the model so we can deploy it
bst.save_model('model.bst')
"""
Explanation: Train the XGBoost model
End of explanation
"""
GCP_PROJECT = 'YOUR_GCP_PROJECT'
MODEL_BUCKET = 'gs://your_storage_bucket'
MODEL_NAME = 'your_model_name' # You'll create this model below
VERSION_NAME = 'v1'
# Copy your model file to Cloud Storage
!gsutil cp ./model.bst $MODEL_BUCKET
# Configure gcloud to use your project
!gcloud config set project $GCP_PROJECT
# Create a model
!gcloud ai-platform models create $MODEL_NAME --regions us-central1
# Create a version, this will take ~2 minutes to deploy
!gcloud ai-platform versions create $VERSION_NAME \
--model=$MODEL_NAME \
--framework='XGBOOST' \
--runtime-version=1.15 \
--origin=$MODEL_BUCKET \
--staging-bucket=$MODEL_BUCKET \
--python-version=3.7 \
--project=$GCP_PROJECT \
--region=global
"""
Explanation: Deploy model to AI Platform
Copy your saved model file to Cloud Storage and deploy the model to AI Platform. In order for this to work, you'll need the Cloud AI Platform Models API enabled. Update the values in the next cell with the info for your GCP project. Replace GCP_PROJECT with the value in the Qwiklabs lab page for GCP Project ID in the left pane, replace MODEL_BUCKET with gs:// with the value for BucketName appended, and replace MODEL_NAME with a name for your model.
End of explanation
"""
# Format a subset of the test data to send to the What-if Tool for visualization
# Append ground truth label value to training data
# This is the number of examples you want to display in the What-if Tool
num_wit_examples = 500
test_examples = np.hstack((x_test[:num_wit_examples].values,y_test[:num_wit_examples].reshape(-1,1)))
# Create a What-if Tool visualization, it may take a minute to load
# See the cell below this for exploration ideas
# This prediction adjustment function is needed as this xgboost model's
# prediction returns just a score for the positive class of the binary
# classification, whereas the What-If Tool expects a list of scores for each
# class (in this case, both the negative class and the positive class).
def adjust_prediction(pred):
return [1 - pred, pred]
config_builder = (WitConfigBuilder(test_examples.tolist(), data.columns.tolist() + ['mortgage_status'])
.set_ai_platform_model(GCP_PROJECT, MODEL_NAME, VERSION_NAME, adjust_prediction=adjust_prediction)
.set_target_feature('mortgage_status')
.set_label_vocab(['denied', 'approved']))
WitWidget(config_builder, height=800)
"""
Explanation: Using the What-if Tool to interpret your model
Once your model has deployed, you're ready to connect it to the What-if Tool using the WitWidget.
Note: You can ignore the message TypeError(unsupported operand type(s) for -: 'int' and 'list') while creating a What-if Tool visualization.
End of explanation
"""
|
gururajl/deep-learning | gan_mnist/Intro_to_GANs_Solution.ipynb | mit | %matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
"""
Explanation: Generative Adversarial Network
In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!
GANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:
Pix2Pix
CycleGAN
A whole list
The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.
The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can foold the discriminator.
The output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.
End of explanation
"""
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
return inputs_real, inputs_z
"""
Explanation: Model Inputs
First we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks.
End of explanation
"""
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('generator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.tanh(logits)
return out
"""
Explanation: Generator network
Here we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.
Variable Scope
Here we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks.
We could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.
To use tf.variable_scope, you use a with statement:
python
with tf.variable_scope('scope_name', reuse=False):
# code here
Here's more from the TensorFlow documentation to get another look at using tf.variable_scope.
Leaky ReLU
TensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can use take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x:
$$
f(x) = max(\alpha * x, x)
$$
Tanh Output
The generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.
End of explanation
"""
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('discriminator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
"""
Explanation: Discriminator
The discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.
End of explanation
"""
# Size of input image to discriminator
input_size = 784
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Smoothing
smooth = 0.1
"""
Explanation: Hyperparameters
End of explanation
"""
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Build the model
g_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha)
# g_model is the generator output
d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, n_units=d_hidden_size, alpha=alpha)
"""
Explanation: Build network
Now we're building the network from the functions defined above.
First is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z.
Then, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes.
Then the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True).
End of explanation
"""
# Calculate losses
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_logits_real) * (1 - smooth)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_logits_real)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_logits_fake)))
"""
Explanation: Discriminator and Generator Losses
Now we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will by sigmoid cross-entropys, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like
python
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
For the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth)
The discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.
Finally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.
End of explanation
"""
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator')]
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
"""
Explanation: Optimizers
We want to update the generator and discriminator variables separately. So we need to get the variables for each part build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph.
For the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables to start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance).
We can do something similar with the discriminator. All the variables in the discriminator start with discriminator.
Then, in the optimizer we pass the variable lists to var_list in the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list.
End of explanation
"""
batch_size = 100
epochs = 100
samples = []
losses = []
# Only save generator variables
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
"""
Explanation: Training
End of explanation
"""
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
"""
Explanation: Training loss
Here we'll check out the training losses for the generator and discriminator.
End of explanation
"""
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
"""
Explanation: Generator samples from training
Here we can view samples of images from the generator. First we'll look at images taken while training.
End of explanation
"""
_ = view_samples(-1, samples)
"""
Explanation: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 1, 7, 3, 2. Since this is just a sample, it isn't representative of the full range of images this generator can make.
End of explanation
"""
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
"""
Explanation: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
End of explanation
"""
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
_ = view_samples(0, [gen_samples])
"""
Explanation: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise like 1s and 9s.
Sampling from the generator
We can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!
End of explanation
"""
|
lcharleux/numerical_analysis | doc/ODE/ODE_harmonic_oscillator.ipynb | gpl-2.0 | # Setup
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
# Setup
f0 = 1.
omega0 = 2. * np.pi * f0
a = 1.
"""
Explanation: Ordinary Differential Equations : Practical work on the harmonic oscillator
In this example, you will simulate an harmonic oscillator and compare the numerical solution to the closed form one.
Theory
Read about the theory of harmonic oscillators on Wikipedia
Mechanical oscillator
The case of the one dimensional mechanical oscillator leads to the following equation:
$$
m \ddot x + \mu \dot x + k x = m \ddot x_d
$$
Where:
$x$ is the position,
$\dot x$ and $\ddot x$ are respectively the speed and acceleration,
$m$ is the mass,
$\mu$ the
$k$ the stiffness,
and $\ddot x_d$ the driving acceleration which is null if the oscillator is free.
Canonical equation
Most 1D oscilators follow the same canonical equation:
$$
\ddot x + 2 \zeta \omega_0 \dot x + \omega_0^2 x = \ddot x_d
$$
Where:
$\omega_0$ is the undamped pulsation,
$\zeta$ is damping ratio,
$\ddot x_d$ is the imposed acceleration.
In the case of the mechanical oscillator:
$$
\omega_0 = \sqrt{\dfrac{k}{m}}
$$
$$
\zeta = \dfrac{\mu}{2\sqrt{mk}}
$$
Undampened oscillator
First, you will focus on the case of an undamped free oscillator ($\zeta = 0$, $\ddot x_d = 0$) with the following initial conditions:
$$
\left \lbrace
\begin{split}
x(t = 0) = 1 \
\dot x(t = 0) = 0
\end{split}\right.
$$
The closed form solution is:
$$
x(t) = a\cos \omega_0 t
$$
End of explanation
"""
# Complete here
#t =
#xth =
"""
Explanation: Part 1: theoretical solution
Plot the closed form solution of the undamped free oscillator for 5 periods.
Steps:
Create an array $t$ reprenting time,
Create a function $x_{th}$ representing the amplitude of the closed form solution,
Plot $x_{th}$ vs $t$.
End of explanation
"""
|
Diyago/Machine-Learning-scripts | statistics/stat.bootstrap_intervals.ipynb | apache-2.0 | import numpy as np
import pandas as pd
%pylab inline
"""
Explanation: Доверительные интервалы на основе bootstrap
End of explanation
"""
data = pd.read_csv('verizon.txt', sep='\t')
data.shape
data.head()
data.Group.value_counts()
pylab.figure(figsize(12, 5))
pylab.subplot(1,2,1)
pylab.hist(data[data.Group == 'ILEC'].Time, bins = 20, color = 'b', range = (0, 100), label = 'ILEC')
pylab.legend()
pylab.subplot(1,2,2)
pylab.hist(data[data.Group == 'CLEC'].Time, bins = 20, color = 'r', range = (0, 100), label = 'CLEC')
pylab.legend()
pylab.show()
"""
Explanation: Загрузка данных
Время ремонта телекоммуникаций
Verizon — основная региональная телекоммуникационная компания (Incumbent Local Exchange Carrier, ILEC) в западной
части США. В связи с этим данная компания обязана предоставлять сервис ремонта телекоммуникационного оборудования
не только для своих клиентов, но и для клиентов других локальных телекоммуникационых компаний (Competing Local Exchange Carriers, CLEC). При этом в случаях, когда время ремонта оборудования для клиентов других компаний существенно выше, чем для собственных, Verizon может быть оштрафована.
End of explanation
"""
def get_bootstrap_samples(data, n_samples):
indices = np.random.randint(0, len(data), (n_samples, len(data)))
samples = data[indices]
return samples
def stat_intervals(stat, alpha):
boundaries = np.percentile(stat, [100 * alpha / 2., 100 * (1 - alpha / 2.)])
return boundaries
"""
Explanation: Bootstrap
End of explanation
"""
ilec_time = data[data.Group == 'ILEC'].Time.values
clec_time = data[data.Group == 'CLEC'].Time.values
np.random.seed(0)
ilec_median_scores = map(np.median, get_bootstrap_samples(ilec_time, 1000))
clec_median_scores = map(np.median, get_bootstrap_samples(clec_time, 1000))
print "95% confidence interval for the ILEC median repair time:", stat_intervals(ilec_median_scores, 0.05)
print "95% confidence interval for the CLEC median repair time:", stat_intervals(clec_median_scores, 0.05)
"""
Explanation: Интервальная оценка медианы
End of explanation
"""
print "difference between medians:", np.median(clec_time) - np.median(ilec_time)
"""
Explanation: Точечная оценка разности медиан
End of explanation
"""
delta_median_scores = map(lambda x: x[1] - x[0], zip(ilec_median_scores, clec_median_scores))
print "95% confidence interval for the difference between medians", stat_intervals(delta_median_scores, 0.05)
"""
Explanation: Интервальная оценка разности медиан
End of explanation
"""
|
braemy/mentor-mentee-recommender-system | 2.Topics.ipynb | mit | #Uncomment this cell if you don't have the data on your computer
#nltk.download("stopwords")
#nltk.download("wordnet")
"""
Explanation: Preprocessing
For the topic extraction part we will use the dictionary of author->list_of_publications collected in the previous step. We need to do some preprocessing first
We use the utils.simple_preprocess function from gensim to return a list of lowered tokenized word
We stem each word
filter out the stopwords.
End of explanation
"""
english_stop_words = ["a", "about", "above", "above", "across", "after", "afterwards", "again", "against", "all", "almost", "alone", "along", "already", "also","although","always","am","among", "amongst", "amoungst", "amount", "an", "and", "another", "any","anyhow","anyone","anything","anyway", "anywhere", "are", "around", "as", "at", "back","be","became", "because","become","becomes", "becoming", "been", "before", "beforehand", "behind", "being", "below", "beside", "besides", "between", "beyond", "bill", "both", "bottom","but", "by", "call", "can", "cannot", "cant", "co", "con", "could", "couldnt", "cry", "de", "describe", "detail", "do", "done", "down", "due", "during", "each", "eg", "eight", "either", "eleven","else", "elsewhere", "empty", "enough", "etc", "even", "ever", "every", "everyone", "everything", "everywhere", "except", "few", "fifteen", "fify", "fill", "find", "fire", "first", "five", "for", "former", "formerly", "forty", "found", "four", "from", "front", "full", "further", "get", "give", "go", "had", "has", "hasnt", "have", "he", "hence", "her", "here", "hereafter", "hereby", "herein", "hereupon", "hers", "herself", "him", "himself", "his", "how", "however", "hundred", "ie", "if", "in", "inc", "indeed", "interest", "into", "is", "it", "its", "itself", "keep", "last", "latter", "latterly", "least", "less", "ltd", "made", "many", "may", "me", "meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly", "move", "much", "must", "my", "myself", "name", "namely", "neither", "never", "nevertheless", "next", "nine", "no", "nobody", "none", "noone", "nor", "not", "nothing", "now", "nowhere", "of", "off", "often", "on", "once", "one", "only", "onto", "or", "other", "others", "otherwise", "our", "ours", "ourselves", "out", "over", "own","part", "per", "perhaps", "please", "put", "rather", "re", "same", "see", "seem", "seemed", "seeming", "seems", "serious", "several", "she", "should", "show", "side", "since", "sincere", "six", "sixty", "so", "some", "somehow", "someone", "something", "sometime", "sometimes", "somewhere", "still", "such", "system", "take", "ten", "than", "that", "the", "their", "them", "themselves", "then", "thence", "there", "thereafter", "thereby", "therefore", "therein", "thereupon", "these", "they", "thickv", "thin", "third", "this", "those", "though", "three", "through", "throughout", "thru", "thus", "to", "together", "too", "top", "toward", "towards", "twelve", "twenty", "two", "un", "under", "until", "up", "upon", "us", "very", "via", "was", "we", "well", "were", "what", "whatever", "when", "whence", "whenever", "where", "whereafter", "whereas", "whereby", "wherein", "whereupon", "wherever", "whether", "which", "while", "whither", "who", "whoever", "whole", "whom", "whose", "why", "will", "with", "within", "without", "would", "yet", "you", "your", "yours", "yourself", "yourselves", "the",'like', 'think', 'know', 'want', 'sure', 'thing', 'send', 'sent', 'speech', 'print', 'time','want', 'said', 'maybe', 'today', 'tomorrow', 'thank', 'thanks']
specific_stop_words = ['base', 'use', 'model', 'process', 'network']
sw =stopwords.words('english') + english_stop_words + specific_stop_words
"""
Explanation: For the stop words we use the one given by nltk. This set seems small so we include also other common English stop words found online or in the titles
End of explanation
"""
lemmatizer = WordNetLemmatizer()
stemmer = EnglishStemmer()
print("Stemmer",stemmer.stem("Algorithm"), stemmer.stem("Algorithmic"))
print("Lemmatizer",lemmatizer.lemmatize("algorithm"), lemmatizer.lemmatize("Algorithmic"))
"""
Explanation: We decide to use a stemmer and not a lemmatizer (from nltk). The reason is that we want to group together words with the same meaning. For example if one publication contains algorithm and another one contains Algorithmic in this case it would help to map those 2 words to the same. Let's see the output of a stemmer and lemmatizer. Even if our model should be able to capture the similitude among those 2 words, it will help reduce the vocabulary and speed up the training
End of explanation
"""
def pre_processing(titles):
list_of_tokens = []
for title in titles:
tokens = utils.simple_preprocess(title)
tokens = [stemmer.stem(x) for x in tokens]
tokens = list(filter(lambda t: t not in sw, tokens))
list_of_tokens.append(tokens)
return list_of_tokens
authorID_to_titles_stem = {id_: pre_processing(titles) for id_, titles in tqdm(authorID_to_titles.items())}
utl.pickle_data(authorID_to_titles_stem, "../pmi_data/authorID_to_titles_stem.p")
"""
Explanation: Indeed the lemmatizer keep 2 different words. Let's use the stemmer
End of explanation
"""
authorID_to_titles_stem = utl.load_pickle("../pmi_data/authorID_to_titles_stem.p")
authorID_to_document = dict()
for author, titles in tqdm(authorID_to_titles_stem.items()):
authorID_to_document[author] = []
for t in titles:
authorID_to_document[author].extend(t)
"""
Explanation: Topic Extraction
We want to extract the k main topics among all the publication. And then for each author we will compute its score in each one of those topics
We use Latent Dirichlet allocation and the implementation provided by Gensim.
Latent Dirichlet allocation (LDA)
The principle behind LDA is that if you have a collection of documents, each documents represent a mixtures of topics. It's means that a documents contains words that belong to different categories. The goal of LDA is to retrieve those sets of words used to create the documents
Extraction
We have a dictionnary of authorID-> list(list(tokens)) with the inner list representing the titles
The LDA implementation of gensim take as parameter:
- a dictionary token -> id
- list of list of (token,token_count)
We use 2 functions provided by Gensim
Since we are dealing with title, most of the time, all the words we have an occurance of 1 in the titles. And then all the word will have the same importance it will be hard for the algorithm to infer the probality p(topics | title)
Since we want to find the set of topic that represent an author, it means that we have already made the assumption that all the publications of one author should be in a subset of topics. So lets put all the publication of one author together like if it was a big documents
End of explanation
"""
dictionary = corpora.Dictionary([doc for doc in tqdm(authorID_to_document.values())])
corpus = [dictionary.doc2bow(doc) for doc in tqdm(authorID_to_document.values())]
"""
Explanation: Now we have a list of author->document. We can build the dictionaray and transform each document to a list of (token, token_count)
End of explanation
"""
#parameters
num_topics = 20 # number of topics LDA has to select
passes = 1 # number of passe in the lda training
num_words = 5 # number of most important word in one topic to be printed
tmp = corpus
corpus = tmp
corpus = np.random.choice(corpus, int(len(corpus)/1000))
len(corpus)
c = [c for c in tqdm(tmp) if len(c)> 100]
len(c)
start = time()
pp = pprint.PrettyPrinter(depth=2)
lda = models.LdaModel(c, num_topics=num_topics, id2word = dictionary, passes=passes)
print("Training time:", round((time()-start)/60,2),"[min]")
pp.pprint(lda.print_topics(lda.num_topics, num_words=num_words))
lda.save('lda.model')
utl.pickle_data(lda, "../pmi_data/lda_model__20_100.p")
def compute_score(titles):
total_score = np.zeros(num_topics)
for title in titles:
#lda output : [(id1, score1), (id2, score2),... if id != 0]
for id_, value in lda[dictionary.doc2bow(title)]:
total_score[id_] += value
return total_score
score_by_author_by_document = [compute_score([doc]) for _, doc in tqdm(authorID_to_document.items())]
utl.pickle_data(score_by_author_by_document, "../pmi_data/score_by_author_by_document.p")
score_by_author_by_titles = [compute_score(titles) for _, titles in tqdm(authorID_to_titles_stem.items())]
utl.pickle_data(score_by_author_by_titles,"../pmi_data/score_by_author_by_titles.p")
"""
Explanation: Set up the number of parameter, we select 20 topics.
End of explanation
"""
|
mit-crpg/openmc | examples/jupyter/nuclear-data.ipynb | mit | %matplotlib inline
import os
from pprint import pprint
import shutil
import subprocess
import urllib.request
import h5py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm
from matplotlib.patches import Rectangle
import openmc.data
"""
Explanation: Nuclear Data
In this notebook, we will go through the salient features of the openmc.data package in the Python API. This package enables inspection, analysis, and conversion of nuclear data from ACE files. Most importantly, the package provides a mean to generate HDF5 nuclear data libraries that are used by the transport solver.
End of explanation
"""
openmc.data.atomic_mass('Fe54')
openmc.data.NATURAL_ABUNDANCE['H2']
openmc.data.atomic_weight('C')
"""
Explanation: Physical Data
Some very helpful physical data is available as part of openmc.data: atomic masses, natural abundances, and atomic weights.
End of explanation
"""
url = 'https://anl.box.com/shared/static/kxm7s57z3xgfbeq29h54n7q6js8rd11c.ace'
filename, headers = urllib.request.urlretrieve(url, 'gd157.ace')
# Load ACE data into object
gd157 = openmc.data.IncidentNeutron.from_ace('gd157.ace')
gd157
"""
Explanation: The IncidentNeutron class
The most useful class within the openmc.data API is IncidentNeutron, which stores to continuous-energy incident neutron data. This class has factory methods from_ace, from_endf, and from_hdf5 which take a data file on disk and parse it into a hierarchy of classes in memory. To demonstrate this feature, we will download an ACE file (which can be produced with NJOY 2016) and then load it in using the IncidentNeutron.from_ace method.
End of explanation
"""
total = gd157[1]
total
"""
Explanation: Cross sections
From Python, it's easy to explore (and modify) the nuclear data. Let's start off by reading the total cross section. Reactions are indexed using their "MT" number -- a unique identifier for each reaction defined by the ENDF-6 format. The MT number for the total cross section is 1.
End of explanation
"""
total.xs
"""
Explanation: Cross sections for each reaction can be stored at multiple temperatures. To see what temperatures are available, we can look at the reaction's xs attribute.
End of explanation
"""
total.xs['294K'](1.0)
"""
Explanation: To find the cross section at a particular energy, 1 eV for example, simply get the cross section at the appropriate temperature and then call it as a function. Note that our nuclear data uses eV as the unit of energy.
End of explanation
"""
total.xs['294K']([1.0, 2.0, 3.0])
"""
Explanation: The xs attribute can also be called on an array of energies.
End of explanation
"""
gd157.energy
energies = gd157.energy['294K']
total_xs = total.xs['294K'](energies)
plt.loglog(energies, total_xs)
plt.xlabel('Energy (eV)')
plt.ylabel('Cross section (b)')
"""
Explanation: A quick way to plot cross sections is to use the energy attribute of IncidentNeutron. This gives an array of all the energy values used in cross section interpolation for each temperature present.
End of explanation
"""
pprint(list(gd157.reactions.values())[:10])
"""
Explanation: Reaction Data
Most of the interesting data for an IncidentNeutron instance is contained within the reactions attribute, which is a dictionary mapping MT values to Reaction objects.
End of explanation
"""
n2n = gd157[16]
print('Threshold = {} eV'.format(n2n.xs['294K'].x[0]))
"""
Explanation: Let's suppose we want to look more closely at the (n,2n) reaction. This reaction has an energy threshold
End of explanation
"""
n2n.xs
xs = n2n.xs['294K']
plt.plot(xs.x, xs.y)
plt.xlabel('Energy (eV)')
plt.ylabel('Cross section (b)')
plt.xlim((xs.x[0], xs.x[-1]))
"""
Explanation: The (n,2n) cross section, like all basic cross sections, is represented by the Tabulated1D class. The energy and cross section values in the table can be directly accessed with the x and y attributes. Using the x and y has the nice benefit of automatically acounting for reaction thresholds.
End of explanation
"""
n2n.products
neutron = n2n.products[0]
neutron.distribution
"""
Explanation: To get information on the energy and angle distribution of the neutrons emitted in the reaction, we need to look at the products attribute.
End of explanation
"""
dist = neutron.distribution[0]
dist.energy_out
"""
Explanation: We see that the neutrons emitted have a correlated angle-energy distribution. Let's look at the energy_out attribute to see what the outgoing energy distributions are.
End of explanation
"""
for e_in, e_out_dist in zip(dist.energy[::5], dist.energy_out[::5]):
plt.semilogy(e_out_dist.x, e_out_dist.p, label='E={:.2f} MeV'.format(e_in/1e6))
plt.ylim(top=1e-6)
plt.legend()
plt.xlabel('Outgoing energy (eV)')
plt.ylabel('Probability/eV')
plt.show()
"""
Explanation: Here we see we have a tabulated outgoing energy distribution for each incoming energy. Note that the same probability distribution classes that we could use to create a source definition are also used within the openmc.data package. Let's plot every fifth distribution to get an idea of what they look like.
End of explanation
"""
fig = plt.figure()
ax = fig.add_subplot(111)
cm = matplotlib.cm.Spectral_r
# Determine size of probability tables
urr = gd157.urr['294K']
n_energy = urr.table.shape[0]
n_band = urr.table.shape[2]
for i in range(n_energy):
# Get bounds on energy
if i > 0:
e_left = urr.energy[i] - 0.5*(urr.energy[i] - urr.energy[i-1])
else:
e_left = urr.energy[i] - 0.5*(urr.energy[i+1] - urr.energy[i])
if i < n_energy - 1:
e_right = urr.energy[i] + 0.5*(urr.energy[i+1] - urr.energy[i])
else:
e_right = urr.energy[i] + 0.5*(urr.energy[i] - urr.energy[i-1])
for j in range(n_band):
# Determine maximum probability for a single band
max_prob = np.diff(urr.table[i,0,:]).max()
# Determine bottom of band
if j > 0:
xs_bottom = urr.table[i,1,j] - 0.5*(urr.table[i,1,j] - urr.table[i,1,j-1])
value = (urr.table[i,0,j] - urr.table[i,0,j-1])/max_prob
else:
xs_bottom = urr.table[i,1,j] - 0.5*(urr.table[i,1,j+1] - urr.table[i,1,j])
value = urr.table[i,0,j]/max_prob
# Determine top of band
if j < n_band - 1:
xs_top = urr.table[i,1,j] + 0.5*(urr.table[i,1,j+1] - urr.table[i,1,j])
else:
xs_top = urr.table[i,1,j] + 0.5*(urr.table[i,1,j] - urr.table[i,1,j-1])
# Draw rectangle with appropriate color
ax.add_patch(Rectangle((e_left, xs_bottom), e_right - e_left, xs_top - xs_bottom,
color=cm(value)))
# Overlay total cross section
ax.plot(gd157.energy['294K'], total.xs['294K'](gd157.energy['294K']), 'k')
# Make plot pretty and labeled
ax.set_xlim(1.0, 1.0e5)
ax.set_ylim(1e-1, 1e4)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('Energy (eV)')
ax.set_ylabel('Cross section(b)')
"""
Explanation: Unresolved resonance probability tables
We can also look at unresolved resonance probability tables which are stored in a ProbabilityTables object. In the following example, we'll create a plot showing what the total cross section probability tables look like as a function of incoming energy.
End of explanation
"""
gd157.export_to_hdf5('gd157.h5', 'w')
"""
Explanation: Exporting HDF5 data
If you have an instance IncidentNeutron that was created from ACE or HDF5 data, you can easily write it to disk using the export_to_hdf5() method. This can be used to convert ACE to HDF5 or to take an existing data set and actually modify cross sections.
End of explanation
"""
gd157_reconstructed = openmc.data.IncidentNeutron.from_hdf5('gd157.h5')
np.all(gd157[16].xs['294K'].y == gd157_reconstructed[16].xs['294K'].y)
"""
Explanation: With few exceptions, the HDF5 file encodes the same data as the ACE file.
End of explanation
"""
h5file = h5py.File('gd157.h5', 'r')
main_group = h5file['Gd157/reactions']
for name, obj in sorted(list(main_group.items()))[:10]:
if 'reaction_' in name:
print('{}, {}'.format(name, obj.attrs['label'].decode()))
n2n_group = main_group['reaction_016']
pprint(list(n2n_group.values()))
"""
Explanation: And one of the best parts of using HDF5 is that it is a widely used format with lots of third-party support. You can use h5py, for example, to inspect the data.
End of explanation
"""
n2n_group['294K/xs'][()]
"""
Explanation: So we see that the hierarchy of data within the HDF5 mirrors the hierarchy of Python objects that we manipulated before.
End of explanation
"""
# Download ENDF file
url = 'https://t2.lanl.gov/nis/data/data/ENDFB-VII.1-neutron/Gd/157'
filename, headers = urllib.request.urlretrieve(url, 'gd157.endf')
# Load into memory
gd157_endf = openmc.data.IncidentNeutron.from_endf(filename)
gd157_endf
"""
Explanation: Working with ENDF files
In addition to being able to load ACE and HDF5 data, we can also load ENDF data directly into an IncidentNeutron instance using the from_endf() factory method. Let's download the ENDF/B-VII.1 evaluation for $^{157}$Gd and load it in:
End of explanation
"""
elastic = gd157_endf[2]
"""
Explanation: Just as before, we can get a reaction by indexing the object directly:
End of explanation
"""
elastic.xs
"""
Explanation: However, if we look at the cross section now, we see that it isn't represented as tabulated data anymore.
End of explanation
"""
elastic.xs['0K'](0.0253)
"""
Explanation: If had Cython installed when you built/installed OpenMC, you should be able to evaluate resonant cross sections from ENDF data directly, i.e., OpenMC will reconstruct resonances behind the scenes for you.
End of explanation
"""
gd157_endf.resonances.ranges
"""
Explanation: When data is loaded from an ENDF file, there is also a special resonances attribute that contains resolved and unresolved resonance region data (from MF=2 in an ENDF file).
End of explanation
"""
[(r.energy_min, r.energy_max) for r in gd157_endf.resonances.ranges]
"""
Explanation: We see that $^{157}$Gd has a resolved resonance region represented in the Reich-Moore format as well as an unresolved resonance region. We can look at the min/max energy of each region by doing the following:
End of explanation
"""
# Create log-spaced array of energies
resolved = gd157_endf.resonances.resolved
energies = np.logspace(np.log10(resolved.energy_min),
np.log10(resolved.energy_max), 1000)
# Evaluate elastic scattering xs at energies
xs = elastic.xs['0K'](energies)
# Plot cross section vs energies
plt.loglog(energies, xs)
plt.xlabel('Energy (eV)')
plt.ylabel('Cross section (b)')
"""
Explanation: With knowledge of the energy bounds, let's create an array of energies over the entire resolved resonance range and plot the elastic scattering cross section.
End of explanation
"""
resolved.parameters.head(10)
"""
Explanation: Resonance ranges also have a useful parameters attribute that shows the energies and widths for resonances.
End of explanation
"""
gd157.add_elastic_0K_from_endf('gd157.endf')
"""
Explanation: Heavy-nuclide resonance scattering
OpenMC has two methods for accounting for resonance upscattering in heavy nuclides, DBRC and RVS. These methods rely on 0 K elastic scattering data being present. If you have an existing ACE/HDF5 dataset and you need to add 0 K elastic scattering data to it, this can be done using the IncidentNeutron.add_elastic_0K_from_endf() method. Let's do this with our original gd157 object that we instantiated from an ACE file.
End of explanation
"""
gd157[2].xs
"""
Explanation: Let's check to make sure that we have both the room temperature elastic scattering cross section as well as a 0K cross section.
End of explanation
"""
# Download ENDF file
url = 'https://t2.lanl.gov/nis/data/data/ENDFB-VII.1-neutron/H/2'
filename, headers = urllib.request.urlretrieve(url, 'h2.endf')
# Run NJOY to create deuterium data
h2 = openmc.data.IncidentNeutron.from_njoy('h2.endf', temperatures=[300., 400., 500.], stdout=True)
"""
Explanation: Generating data from NJOY
To run OpenMC in continuous-energy mode, you generally need to have ACE files already available that can be converted to OpenMC's native HDF5 format. If you don't already have suitable ACE files or need to generate new data, both the IncidentNeutron and ThermalScattering classes include from_njoy() methods that will run NJOY to generate ACE files and then read those files to create OpenMC class instances. The from_njoy() methods take as input the name of an ENDF file on disk. By default, it is assumed that you have an executable named njoy available on your path. This can be configured with the optional njoy_exec argument. Additionally, if you want to show the progress of NJOY as it is running, you can pass stdout=True.
Let's use IncidentNeutron.from_njoy() to run NJOY to create data for $^2$H using an ENDF file. We'll specify that we want data specifically at 300, 400, and 500 K.
End of explanation
"""
h2[2].xs
"""
Explanation: Now we can use our h2 object just as we did before.
End of explanation
"""
url = 'https://github.com/mit-crpg/WMP_Library/releases/download/v1.1/092238.h5'
filename, headers = urllib.request.urlretrieve(url, '092238.h5')
u238_multipole = openmc.data.WindowedMultipole.from_hdf5('092238.h5')
"""
Explanation: Note that 0 K elastic scattering data is automatically added when using from_njoy() so that resonance elastic scattering treatments can be used.
Windowed multipole
OpenMC can also be used with an experimental format called windowed multipole. Windowed multipole allows for analytic on-the-fly Doppler broadening of the resolved resonance range. Windowed multipole data can be downloaded with the openmc-get-multipole-data script. This data can be used in the transport solver, but it can also be used directly in the Python API.
End of explanation
"""
u238_multipole(1.0, 294)
"""
Explanation: The WindowedMultipole object can be called with energy and temperature values. Calling the object gives a tuple of 3 cross sections: elastic scattering, radiative capture, and fission.
End of explanation
"""
E = np.linspace(5, 25, 1000)
plt.semilogy(E, u238_multipole(E, 293.606)[1])
"""
Explanation: An array can be passed for the energy argument.
End of explanation
"""
E = np.linspace(6.1, 7.1, 1000)
plt.semilogy(E, u238_multipole(E, 0)[1])
plt.semilogy(E, u238_multipole(E, 900)[1])
"""
Explanation: The real advantage to multipole is that it can be used to generate cross sections at any temperature. For example, this plot shows the Doppler broadening of the 6.67 eV resonance between 0 K and 900 K.
End of explanation
"""
|
jalabort/templatetracker | notebooks/CF Tracker.ipynb | bsd-3-clause | video_path = '../data/video.mp4'
cam = cv2.VideoCapture(video_path)
print 'Is video capture opened?', cam.isOpened()
n_frames = 500
resolution = (640, 360)
frames = []
for _ in range(n_frames):
# read frame
frame = cam.read()[1]
# scale down
frame = cv2.resize(frame, resolution)
# bgr to rgb
frame = frame[..., ::-1]
# pixel values from 0 to 1
frame = np.require(frame, dtype=np.double)
frame /= 255
# roll channel axis to the front
frame = np.rollaxis(frame, -1)
# build menpo image and turn it to grayscale
frame = Image(frame)
# append to frame list
frames.append(frame)
cam.release()
visualize_images(frames)
"""
Explanation: Correlation Filter (CF) based Tracker
This tracker is a first initial implementation of the ideas describes in the following 3 papers regarding template tracking using adaptive correlation filters:
David S. Bolme, J. Ross Beveridge, Bruce A. Draper and Yui Man Lui. "Visual Object Tracking using Adaptive Correlation Filters". CVPR, 2010
Hamed Kiani Galoogahi, Terence Sim, Simon Lucey. "Multi-Channel Correlation Filters". ICCV, 2013.
J. F. Henriques, R. Caseiro, P. Martins, J. Batista. "High-Speed Tracking with Kernelized Correlation Filters". TPAMI, 2015.
Load and manipulate basket ball video
Read, pre-process and store a particular number of frames of the provided basket ball video.
End of explanation
"""
# first frame
frame0 = frames[0]
# manually define target centre
target_centre0 = PointCloud(np.array([168.0, 232.0])[None])
# manually define target size
target_shape = (31.0, 31.0)
# build bounding box containing the target
target_bb = generate_bounding_box(target_centre0, target_shape)
# add target centre and bounding box as frame landmarks
frame0.landmarks['target_centre'] = target_centre0
frame0.landmarks['target_bb'] = target_bb
# visualize initialization
frame0.view_widget()
"""
Explanation: Define the position and size of the target on the first frame. Note that we need to this manually!
End of explanation
"""
# set options
# specify the kind of filters to be learned and incremented
learn_filter = learn_mccf # learn_mosse or learn_mccf
increment_filter = increment_mccf # increment_mosse or increment_mccf; should match with the previous learn filter!
# specify image representation used for tracking
features = no_op # no_op, greyscale, greyscale_hog
tracker = CFTracker(frame0, target_centre0, target_shape, learn_filter=learn_filter,
increment_filter=increment_filter, features=features)
"""
Explanation: Track basket ball video
Create and initialize the correlation filter based tracker by giving it the first frame and the target position and size on the first frame.
End of explanation
"""
# only the up to the first 5 channels are shown
n_channels = np.minimum(5, tracker.filter.shape[0])
fig_size = (3*n_channels, 3*n_channels)
fig = plt.figure()
fig.set_size_inches(fig_size)
for j, c in enumerate(tracker.filter[:n_channels]):
plt.subplot(1, n_channels, j+1)
plt.title('CF in spatial domain')
plt.imshow(tracker.filter[j])
fig = plt.figure()
fig.set_size_inches(fig_size)
for j, c in enumerate(tracker.filter[:n_channels]):
plt.subplot(1, n_channels, j+1)
plt.title('CF in frequency domain')
plt.imshow(np.abs(fftshift(fft2(tracker.filter[j]))))
"""
Explanation: Visualize the learned correlation filters.
End of explanation
"""
# set options
# filter adaptive parameter; values close to 0 give more weight to filters derive from the last tracked frames,
# values close to 0 give more weight to the initial filter
nu = 0.125
# specifies a threshold on the peak to sidelobe measure below which there is to much uncertainty wrt the target
# position and concequently filters are not updated based on the current frame
psr_threshold = 5
# specifies how the next target position is obtained given the filter response
compute_peak = compute_max_peak # compute_max_peak or compute_meanshift_peak
target_centre = target_centre0
filters = []
targets = []
psrs = []
rs = []
for j, frame in enumerate(frames):
# track target
target_centre, psr, r = tracker.track(frame, target_centre, nu=nu,
psr_threshold=psr_threshold,
compute_peak=compute_peak)
# add target centre and its bounding box as landmarks
frame.landmarks['tracked_centre'] = target_centre
frame.landmarks['tracked_bb'] = generate_bounding_box(target_centre, target_shape)
# add psr to list
psrs.append(psr)
rs.append(r)
# print j
"""
Explanation: Track the previous frames.
End of explanation
"""
visualize_images(frames)
"""
Explanation: Explore tracked frames.
End of explanation
"""
plt.title('Peak to sidelobe ratio (PSR)')
plt.plot(range(len(psrs)), psrs)
"""
Explanation: Show peak to sidelobe ratio (PSR) over the entire sequence.
End of explanation
"""
|
pfschus/fission_bicorrelation | methods/detector_pair_angles.ipynb | mit | %%javascript
$.getScript('https://kmahelona.github.io/ipython_notebook_goodies/ipython_notebook_toc.js')
"""
Explanation: <h1 id="tocheading">Table of Contents</h1>
<div id="toc"></div>
End of explanation
"""
# Import packages
import os.path
import time
import numpy as np
np.set_printoptions(threshold=np.nan) # print entire matrices
import sys
import inspect
import matplotlib.pyplot as plt
import scipy.io as sio
from tqdm import *
import pandas as pd
import seaborn as sns
sns.set_palette('hls')
sns.set_style(style='white')
sys.path.append('../scripts/')
import bicorr as bicorr
import bicorr_plot as bicorr_plot
%load_ext autoreload
%autoreload 2
# Load det_df
det_df = bicorr.load_det_df(plot_flag=True)
"""
Explanation: How to bin detector pair angles?
Author: Patricia Schuster
Date: Summer 2017
Institution: UM NERS
Email: [email protected]
What is the best way to bin detector pair angles? Build some tools.
End of explanation
"""
det_df.head()
plt.scatter(det_df.index,det_df['angle'].values,color='k')
plt.xlabel('DataFrame index')
plt.ylabel('Angle (degrees)')
plt.title('Detector pair angles (degrees)')
plt.xlim([-5,1000])
plt.ylim([0,185])
plt.grid()
plt.savefig('../fig/angle_vs_pair.png')
plt.show()
"""
Explanation: Look at the distribution of angles
End of explanation
"""
plt.hist(det_df['angle'],20,color='gray')
plt.xlabel('Angle between detector pairs (degrees)')
plt.ylabel('Number of detector pairs')
plt.title('Distribution of detector pair angles')
bicorr_plot.save_fig_to_folder(fig_filename='hist_angles',fig_folder='../fig')
plt.show()
"""
Explanation: Histogram with equal bin width
End of explanation
"""
bin_edges = np.array([14.0,20.0,30.0,40.0,50.0,59.0,61.0,65.0,75.0,85.0,95.0, 105.0,115.0,125.0, 133.0,139.0,145.0,149.0,153.0,160.0,170.0,180.0])
bin_centers = (bin_edges[1:] + bin_edges[:-1])/2
hist = np.histogram(det_df['angle'].values, bins = bin_edges)[0]
plt.plot(bin_centers, hist,'.k')
plt.xlabel('Bin center, detector pair angle (degrees)')
plt.ylabel('Number of pairs')
plt.title('Non-uniform bin widths')
plt.show()
"""
Explanation: Histogram with non-uniform bin widths
End of explanation
"""
[unique, unique_counts] = np.unique(det_df['angle'].values,return_counts=True)
print(unique.shape)
plt.plot(unique,unique_counts,'.k')
plt.xlabel('Detector pair angle')
plt.ylabel('Number of pairs')
plt.title('Unique detector pair angles')
plt.savefig('../fig/angles_unique.png')
plt.show()
"""
Explanation: Unique angles, investigate rounding effects
How many unique angles are there?
End of explanation
"""
[unique, unique_counts] = np.unique(np.round(det_df['angle'].values,1),return_counts=True)
print(unique.shape)
plt.plot(unique,unique_counts,'.k')
plt.xlabel('Detector pair angle')
plt.ylabel('Number of pairs')
plt.title('Unique detector pair angles')
plt.show()
"""
Explanation: Round to the nearest .1 degrees. This collapses 6 bins... not very many.
End of explanation
"""
[unique, unique_counts] = np.unique(np.round(det_df['angle'].values,0),return_counts=True)
print(unique.shape)
plt.plot(unique,unique_counts,'.k')
plt.xlabel('Detector pair angle')
plt.ylabel('Number of pairs')
plt.title('Unique detector pair angles')
plt.savefig('../fig/angles_unique_round.png')
plt.show()
"""
Explanation: Try rounding it to the nearest degree. This collapses the data a lot to only 90 unique angles.
End of explanation
"""
angle_hist, angle_bin_edges = np.histogram(det_df['angle'].values,20)
print(angle_bin_edges)
for pair in det_df.index:
det_df.loc[pair,'bin'] = np.digitize(det_df.loc[pair,'angle'],angle_bin_edges)
det_df.head()
"""
Explanation: Fill 'bin' column in DataFrame
Into which bin in the histogram does each detector pair fall?
Make use of the numpy function np.digitize, https://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.digitize.html.
End of explanation
"""
i_bin = 19
det_df[det_df['bin']==i_bin]
pair_is = det_df[det_df['bin']==i_bin].index.values
print(len(pair_is), 'pairs bin', i_bin, 'at indices', pair_is)
det_df.loc[pair_is]
mean_angle = np.mean(det_df.loc[pair_is]['angle'].values)
print(mean_angle)
[unique_bin, unique_counts_bin] = np.unique(det_df.loc[pair_is]['angle'],return_counts=True)
print(unique_bin)
print(unique_counts_bin)
"""
Explanation: Select events within a given angle range
I will write a function generate_pair_is which will take a bin number or an angle range. If a bin number, then the bin column in det_df must already be filled.
By bin number
Store the indices of pairs at a given angle range as pair_is.
End of explanation
"""
th_min = 20.0
th_max = 25.0
ind_mask = (det_df['angle'] > th_min) & (det_df['angle'] <= th_max) # Includes upper bin edge
ind = det_df.index[ind_mask].values
print(ind)
"""
Explanation: By angle range
Provide th_min and th_max. Write the function bicorr.generate_pair_is_th_range. Quite a lengthy function name but we'll go with it.
End of explanation
"""
bicorr.generate_pair_is(det_df,th_min = 0,th_max=20)
det_df.loc[bicorr.generate_pair_is(det_df,i_bin = 18)]
"""
Explanation: Functionalize this in a general method
End of explanation
"""
# Load det_df
det_df = bicorr.load_det_df()
det_df.head()
angle_hist, angle_bin_edges = plt.hist(det_df['angle'].values,bins=np.arange(0,181,30))[0:2]
plt.xlabel('Angle (degrees)')
plt.ylabel('Number of pairs')
plt.show()
angle_bin_edges
"""
Explanation: Loop through each angle bin
Restart the kernel, load det_df with empty bin column, and begin.
Specify angle bin edges
End of explanation
"""
for pair in det_df.index:
det_df.loc[pair,'bin'] = np.digitize(det_df.loc[pair,'angle'],angle_bin_edges,right=True)
det_df.head()
"""
Explanation: Fill bin column
End of explanation
"""
count = 0
for i_bin in np.arange(1,len(angle_bin_edges)):
print('Bin ', i_bin)
pair_is = bicorr.generate_pair_is(det_df, i_bin = i_bin)
count += len(pair_is)
plt.scatter(det_df.loc[pair_is]['d1'],det_df.loc[pair_is]['d2'],c=list(det_df.loc[pair_is]['angle']),s=20,marker='s',edgecolor='none',cmap='jet')
plt.colorbar()
plt.xlim([0,50])
plt.ylim([0,50])
plt.xlabel('Detector 1')
plt.ylabel('Detector 2')
plt.title('Detector pairs between {} and {} degrees'.format(angle_bin_edges[i_bin-1],angle_bin_edges[i_bin]))
plt.show()
print(count)
"""
Explanation: Loop through each bin and generate, plot pair_is
End of explanation
"""
|
ueapy/ueapy.github.io | content/notebooks/2016-01-22-string-formatting.ipynb | mit | header_text = '{!-*- F90 -*-}'
with open('header.inp','w') as header_file:
header_file.write(header_text)
"""
Explanation: Sometimes in order to run some programming software we need to prepare an input description file which specifies the model setup (e.g., chemical mechanism, intergration method, desired type of results, etc). If we are planning to run the model several times with different, for example, initial conditions, constracting such a file using a script could be beneficial. During our last meeting we discussed how to assemble such a file with Python, and here is what we did.
Let's assume that we need to construct a file containing the following information:
```
{!-- F90 --}
INITVALUES
O3 =7.50e+11;
CH4 =4.55e+13;
CO =2.55e+12;
INLINE F90_INIT
TSTART = 03600
TEND = TSTART + 73600
DT = 3600
TEMP = 298
ENDINLINE
```
The first line of this file is a header. Since it is not going to change in our model runs, we can store it in a separate file:
End of explanation
"""
def gen_concs_string(O3=7.50e+11, CH4=4.55e+13, CO=2.55e+12):
concs_string = \
"""
#INITVALUES
O3\t={O3_nd:.2e};
CH4\t={CH4_nd:.2e};
CO\t={CO_nd:.2e};"""\
.format(O3_nd=O3, CH4_nd=CH4, CO_nd=CO)
return concs_string
"""
Explanation: The next four lines define the #INITVALUES section of the file, where the initial concentrations (actually, number densities) of chemical compounds of interest are set. If we want to change only numerical values in this section, it is logical to create a text template which would take into account the syntax of the target sortware and include some sort of 'gaps' to fill in with our initial values. One way of achieving that is to define a function that creates a multline string and has a number of arguments determining the initial concentrations of our chemical species:
End of explanation
"""
def gen_time_str(tstart, nhours, timestep, temp):
time_string = \
"""
#INLINE F90_INIT
TSTART = {tstart}*{timestep}
TEND = TSTART + {nhours}*3600
DT = {timestep}
TEMP = {temp}
#ENDINLINE"""\
.format(tstart=tstart, nhours=nhours, timestep=timestep, temp=temp)
return time_string
"""
Explanation: For convinience we can even set default values for each of the arguments (e.g., here default ozone initial concentration is $7.5\times 10^{11}$ molecules per $cm^{3}$).
By the way, we have just used a new style of string formatting in Python! An old way of doing the same would include a '%' sign in front of the function agruments inside the string expression and a line of code starting with '%' afterwards, like this
```
"""
INITVALUES
O3\t=%(O3_nd).2e;
CH4\t=%(CH4_nd).2e;
CO\t=%(CO_nd).2e;"""\
%{"O3":O3_nd, "CH4":CH4_nd, "CO_nd":CO_nd}
```
but using a new style makes your code more readable. For more examples on differences between old and new styles of string formatting in Python follow this link: PyFormat.
Well, let's reinforce our knowledge and apply a new style of string formatting to reproduce the last section of the input file, which specifies model integration time and temperature:
End of explanation
"""
# Read header
with open('header.inp','r') as header_file:
header_text = header_file.read()
# Use default inital concentrations and set model integration time and temperature
concstr = gen_concs_string()
timestr = gen_time_str(0,7,3600,298)
# Combine all strings together
full_str = header_text + concstr + timestr
# Create function that writes a string to file
def write_str_to_file(string, filename='model.def'):
with open(filename,'w') as def_file:
def_file.write(string)
# Call this function with your string
write_str_to_file(full_str)
"""
Explanation: And finally let's assemble our input description file:
End of explanation
"""
file_mask = '+{one}hours_{two}UTC.jpg'
for i, j in zip((1,2,3), (4,5,6)):
print(file_mask.format(one=i, two=j))
HTML(html)
"""
Explanation: Creating a file mask
There are plenty of other ways to use a new string formatting in Python to your advantage. For example, you could use it to create file names in a loop:
End of explanation
"""
|
madsenmj/ml-introduction-course | Class09/Class09.ipynb | apache-2.0 | import pandas as pd
import matplotlib.pyplot as plt
df1=pd.read_csv('Class09_cluster_example1.csv',index_col=0)
df1.head()
"""
Explanation: Class 09
ML Models: Clustering
We continue working with unsupervised learning in this class. This time we are interested in separating out groups of data or creating clusters. This can be useful if we are looking for patterns in our data. One pattern could be that the data clumps together around certain points. However, before we can check to see if there are data clusters, we have to know how many cluster points to look for. Fortunately the K-means Classifier algorithm works very quickly, so we should be able to try a variety of cluster numbers fairly quickly.
Demo
Before we dive into working with our own data, there is an excellent visualization tool that shows how this algorithm works. We will explore this together as a class before we move on to the next step.
Sample Data
I am going to follow a tutorial that does a good job of describing how k-means clustering works. The data are based on measurements of truck drivers. There are two features: the mean percentage of time a driver was >5 mph over the speed limit and the mean distance driven per day.
We'll use a scikit tool to try the unsupervised clustering on the data. Naturally we'll start by loading the sample data and plotting it to see what we've got.
End of explanation
"""
df1.plot.scatter(x='Distance_Feature',y='Speeding_Feature',marker='.')
x_min, x_max = df1['Distance_Feature'].min() - 1, df1['Distance_Feature'].max() + 1
y_min, y_max = df1['Speeding_Feature'].min() - 1, df1['Speeding_Feature'].max() + 1
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
"""
Explanation: Not knowing exactly what we're working with, let's get the minima and maxima of the two features. We'll use this later to create plots of our predictions.
End of explanation
"""
from sklearn.cluster import KMeans
kmeans2 = KMeans(n_clusters=2).fit(df1)
"""
Explanation: There are a couple of ways we could try splitting up this data. Like we saw in the demonstration, we have to choose the number of clusters ($k$) before we start. We'll try a couple of values and then look at them to see how they map against the data. There isn't any point in splitting the data into training/testing subsets because we don't have a label to train on. So we'll fit all the data.
End of explanation
"""
import numpy as np
# Step size of the mesh. Decrease to increase the quality of the plot.
h = 0.5 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Create the mesh for plotting the decision boundary
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans2.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
# Get the centroid (or center position) of each region so we can plot that, too.
centroids = kmeans2.cluster_centers_
# First plot the mesh that has the predictions for each point on the mesh.
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
# Now put the points on top
plt.plot(df1['Distance_Feature'], df1['Speeding_Feature'], 'k.', markersize=2)
# Plot the centroids as a white X
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means with 2 Clusters')
# And fix the plot limits and labels.
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.ylabel('Speeding_Feature')
plt.xlabel('Distance_Feature')
"""
Explanation: We will make a visualization like we've done with the classification algorithms: we want to map out which regions will be predicted to be which class. That will take a bit of work.
End of explanation
"""
kmeans3 = KMeans(n_clusters=3).fit(df1)
Z = kmeans3.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
centroids = kmeans3.cluster_centers_
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(df1['Distance_Feature'], df1['Speeding_Feature'], 'k.', markersize=2)
# Plot the centroids as a white X
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means with 3 Clusters')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.ylabel('Speeding_Feature')
plt.xlabel('Distance_Feature')
"""
Explanation: So this looks pretty good. There is a nice clear boundary between the two halves of the plot. The centroids (marked as white "X"s on the plot) look about right, too. I'm happy with this clustering.
If we get a future point in now, we can easily classify it as belonging to one of these two groups. For example, we could now create a feature based on this grouping and then use that for other machine learning.
Cluster-data mismatches
Now let's see what happens if we pick 3 clusters from the start.
End of explanation
"""
kmeans4 = KMeans(n_clusters=4).fit(df1)
Z = kmeans4.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
centroids = kmeans4.cluster_centers_
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(df1['Distance_Feature'], df1['Speeding_Feature'], 'k.', markersize=2)
# Plot the centroids as a white X
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means with 4 Clusters')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.ylabel('Speeding_Feature')
plt.xlabel('Distance_Feature')
"""
Explanation: The algorithm does the best it can with what we gave it- it found three clusters of data. But the decision boundaries do not match the data very well. So let's try 4 instead.
End of explanation
"""
# Create the new column based on the labels from the kmeans fit
df1['kmeansgroup'] = kmeans4.labels_
# We group the data by this column
groups = df1.groupby('kmeansgroup')
# Then plot it
trainfig, ax = plt.subplots()
ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
# The next step is to cycle through the groups (based on our categories) and plot each one on the same axis.
for name, group in groups:
ax.plot(group['Distance_Feature'], group['Speeding_Feature'], marker='o', linestyle='', ms=6, label=name)
ax.set_aspect(1)
ax.legend(bbox_to_anchor=(1.2,0.5))
ax.set_xlabel('Distance_Feature')
ax.set_ylabel('Speeding_Feature')
"""
Explanation: That looks better. There are at least three good groups but the algorithm picks out the upper right-hand corner as the fourth grouping. I think we can work with this.
Using k-means groups as features
What if we want to add the groups to the dataframe to use it for other machine learning algorithms? We'll add in the feature column then plot the data using this new feature.
End of explanation
"""
eqINdf = pd.read_csv('Class09_USGS_IN_data.csv',parse_dates=[0])
print(eqINdf.dtypes)
eqINdf.head()
"""
Explanation: We didn't change the names of the features- the clusters are named 0-3, but that's probably good enough for now.
Working with geographical clusters
We'll now move to working with real data. This data comes from the US Geological Service. I searched for all of the magnitude 1.0 and greater earthquakes for the state of Indiana from the last 40 years. We'll do a little data exploration on this dataset to see what we've got.
End of explanation
"""
# I want to make the plots appear bigger. This is how to do that.
plt.rcParams['figure.figsize'] = (12,8)
plt.plot(eqINdf['time'],eqINdf['mag'])
plt.xlabel('time')
plt.ylabel('Magnitude')
"""
Explanation: Let's look first at the earthquake magnitude vs time. It looks like there are a bunch of other columns we aren't going to need.
End of explanation
"""
import seaborn as sns
eqINdf['maggroup'] = eqINdf['mag'].apply(np.round)
g= sns.FacetGrid(eqINdf, col="maggroup",sharey=False)
g = g.map(plt.hist, "time",bins=100)
g.set_xticklabels(rotation=45)
"""
Explanation: That's not really what I want. What I want is the frequency of earthquakes over time. How about plotting a histogram of the number of earthquakes of each magnitude over the time window. We'll need to create a feature where we round the magnitude first. Then we can use a seaborn function to plot the different magnitudes.
End of explanation
"""
markersize = 3**(eqINdf['mag'].values)
# Get the first date in the time series
date0 = eqINdf['time'][0]
# Subtract the first date from each date and convert it to the number of days
eqINdf['dayssincestart'] = eqINdf['time'].apply(lambda x: (x-date0).days)
colors = eqINdf['dayssincestart']
from mpl_toolkits.basemap import Basemap
from matplotlib.patches import Polygon
# Draw the map based on the maximum and minimum positions of our data points
m = Basemap(llcrnrlon=eqINdf['longitude'].min()*1.01,llcrnrlat=eqINdf['latitude'].min()*0.99,urcrnrlon=eqINdf['longitude'].max()*0.99,urcrnrlat=eqINdf['latitude'].max()*1.01,
projection='merc')
# Add the state boundaries to the map
m.readshapefile('st99_d00', name='states', drawbounds=True)
# Prep the data for plotting on the map
x,y = m(eqINdf['longitude'].values, eqINdf['latitude'].values)
# Plot the data points on the map
m.scatter(x,y, s=markersize, c=colors, marker="o",cmap=plt.cm.rainbow,alpha=0.7)
# Now set up the color bar with ticks every 10 years
cbar = plt.colorbar(ticks=np.arange(0,colors.max(),3650))
cbar.ax.set_yticklabels(np.arange(1976,2017,10)) # vertically oriented colorbar
"""
Explanation: That looks more like what I want. The earthquakes look like that happen fairly regularly and mostly randomly.
Let's now look at the geographical data. I want to do a couple of things here. First, I want to plot the map so that I know where the earthquakes happen. We'll use the Basemap library to do that part.
I also want to plot the size of the points as proportional to the magnitude of the earthquake. The way I'm going to do this is to adjust the point size so that the s input to the scatter() function is equal to:
$size = 3^{magnitude}$.
That will make the plot look good.
Finally, I want to color the points chronologically so that, as we go through time, the colors change based on the colors of a rainbow. To do that, I'm going to need a days since start feature. Let's do both of those first.
End of explanation
"""
kmeansIN1 = KMeans(n_clusters=6,n_init=30,max_iter=1000).fit(eqINdf[['latitude','longitude']])
# Obtain labels for each point in mesh. Use last trained model.
h=0.05
xx, yy = np.meshgrid(np.arange(eqINdf['latitude'].min()*0.99, eqINdf['latitude'].max()*1.01, h), np.arange(eqINdf['longitude'].min()*1.01, eqINdf['longitude'].max()*0.99, h))
Z = kmeansIN1.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
# Get the centroid of each region
centroids = kmeansIN1.cluster_centers_
# Draw the map based on the maximum and minimum positions of our data points
m = Basemap(llcrnrlon=eqINdf['longitude'].min()*1.01,llcrnrlat=eqINdf['latitude'].min()*0.99,urcrnrlon=eqINdf['longitude'].max()*0.99,urcrnrlat=eqINdf['latitude'].max()*1.01,
projection='merc')
# Add the state boundaries to the map
m.readshapefile('st99_d00', name='states', drawbounds=True)
# Prep our cluster boundaries for plotting on the map
xb,yb = m(yy,xx)
# Plot the boundaries
m.pcolor(xb,yb,Z,cmap=plt.cm.Paired)
# Plot the points - all the same color, but still sizing them by magnitude
m.scatter(x,y, s=markersize, marker="o",alpha=0.2)
# Plot the centroids as a white X
xc,yc = m(centroids[:, 1], centroids[:, 0])
m.scatter(xc,yc,marker='x', s=169, linewidths=3,color='w', zorder=10)
"""
Explanation: Now I want to try clustering the data. It looks like there may be 5-6 clusters here. We'll try one to see.
End of explanation
"""
|
goodwordalchemy/thinkstats_notes_and_exercises | code/chap03ex.ipynb | gpl-3.0 | %matplotlib inline
import thinkstats2
import thinkplot
import chap01soln
resp = chap01soln.ReadFemResp()
print len(resp)
"""
Explanation: Exercise from Think Stats, 2nd Edition (thinkstats2.com)<br>
Allen Downey
Read the female respondent file.
End of explanation
"""
numkdhh = thinkstats2.Pmf(resp.numkdhh)
numkdhh
"""
Explanation: Make a PMF of <tt>numkdhh</tt>, the number of children under 18 in the respondent's household.
End of explanation
"""
thinkplot.Hist(numkdhh, label='actual')
thinkplot.Config(title="PMF of num children under 18",
xlabel="number of children under 18",
ylabel="probability")
"""
Explanation: Display the PMF.
End of explanation
"""
def BiasPmf(pmf, label=''):
"""Returns the Pmf with oversampling proportional to value.
If pmf is the distribution of true values, the result is the
distribution that would be seen if values are oversampled in
proportion to their values; for example, if you ask students
how big their classes are, large classes are oversampled in
proportion to their size.
Args:
pmf: Pmf object.
label: string label for the new Pmf.
Returns:
Pmf object
"""
new_pmf = pmf.Copy(label=label)
for x, p in pmf.Items():
new_pmf.Mult(x, x)
new_pmf.Normalize()
return new_pmf
"""
Explanation: Define <tt>BiasPmf</tt>.
End of explanation
"""
biased_pmf = BiasPmf(numkdhh, label='biased')
thinkplot.Hist(biased_pmf)
thinkplot.Config(title="PMF of num children under 18",
xlabel="number of children under 18",
ylabel="probability")
"""
Explanation: Make a the biased Pmf of children in the household, as observed if you surveyed the children instead of the respondents.
End of explanation
"""
width = 0.45
thinkplot.PrePlot(2)
thinkplot.Hist(biased_pmf, align="right", label="biased", width=width)
thinkplot.Hist(numkdhh, align="left", label="actual", width=width)
thinkplot.Config(title="PMFs of children under 18 in a household",
xlabel='number of children',
ylabel='probability')
"""
Explanation: Display the actual Pmf and the biased Pmf on the same axes.
End of explanation
"""
print "actual mean:", numkdhh.Mean()
print "biased mean:", biased_pmf.Mean()
"""
Explanation: Compute the means of the two Pmfs.
End of explanation
"""
|
sdpython/pyquickhelper | _unittests/ut_helpgen/data_gallery/notebooks/exams/interro_rapide_20_minutes_2014_12.ipynb | mit | from jyquickhelper import add_notebook_menu
add_notebook_menu()
"""
Explanation: Correction de l'interrogation écrite du 14 novembre 2014
End of explanation
"""
def make_squares(n):
squares = [i**2 for i in range(n)]
"""
Explanation: Enoncé 1
Q1
Le code suivant produit une erreur. Laquelle ?
End of explanation
"""
def make_squares(n):
squares = [i**2 for i in range(n)]
print ( make_squares(2) )
"""
Explanation: Comme il n'y a pas d'instruction return, la fonction retourne toujours None quelque chose le résultat de ce qu'elle calcule.
End of explanation
"""
s = 1
a = 0
for i in range(4):
a += s
s += 2
a
"""
Explanation: Q2
Que vaut a ?
End of explanation
"""
s = 1
a = 0
for i in range(4):
print(a,s)
a += s
s += 2
a
"""
Explanation: Si on affiche les résultats intermédiaires :
End of explanation
"""
d = {i:chr(i+97) for i in range(10)}
x = d[4]
x
"""
Explanation: Q3
On rappelle que ord('a')=97. Que vaut x ?
End of explanation
"""
notes = { "Alice": 17, "Bob": 18, "Jean−Ma": 17 }
notes['Claire'] = 18
def mystere(d):
a = 0
b = []
for k,v in d.items():
if v >= a:
a = v
b.append(k)
return (b,a)
print(mystere(notes))
notes
"""
Explanation: Il suffit de remplacer i par 4. x vaut chr(97+4) et on se déplace de 4 lettres dans l'alphabet, soit e.
Q4
Que fait le programme suivant ?
End of explanation
"""
notes = { "Alice": 17, "Bob": 18, "Jean−Ma": 17 }
notes['Claire'] = 18
def mystere(d):
a = 0
b = []
for k,v in d.items():
if v == a:
b.append(k)
elif v > a:
a = v
b = [ k ]
return (b,a)
print(mystere(notes))
"""
Explanation: Le programme commence par ajouter la clé Claire au dictionnaire. La variable a mémorise la valeur numérique la plus grande. En l'état, le résultat programme est assez imprévisible puisqu'il dépend de l'ordre dans lequel on parcourt les éléments. Je pense que la fonction devrait récupérer dans une liste l'ensemble des prénoms correspondant à cette valeur maximale s'il était écrit comme ceci :
End of explanation
"""
def f(n):
while n != 1:
if n%2 == 0:
n = n/2
else:
n = 3*n + 1
return n
f(3)
f(4)
"""
Explanation: Q5
Que renvoie la fonction suivante en fonction de n ?
End of explanation
"""
|
nikbearbrown/Deep_Learning | NEU/Guowei_Yang_DL/Tensorflow Tutorial_1_GY.ipynb | mit | import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix
"""
Explanation: TensorFlow Tutorial 01
Simple Linear Model
Introduction
This tutorial demonstrates the basic workflow of TensorFlow with a simple linear model.
End of explanation
"""
from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets("data/MNIST/", one_hot=True)
"""
Explanation: This was developed using Python 3.6.4 (Anaconda) and TensorFlow.
Load Data
The MNIST data set is about 12 MB.
End of explanation
"""
print("Size of:")
print("- Training-set:\t\t{}".format(len(data.train.labels)))
print("- Test-set:\t\t{}".format(len(data.test.labels)))
print("- Validatin-set:\t{}".format(len(data.validation.labels)))
"""
Explanation: The MNIST data set has now been loaded and it consists of 70,000 images and associated labels.
End of explanation
"""
data.test.labels[0:5,:]
"""
Explanation: One-Hot Encoding
The data set has been loaded and as so-called One-Hot encoding. This means the labels have been converted to a single number to a vector whose length equals the number of possible classes. All elements of the vector are zero except for the i^th element which is one and means the class is i. For example, the One-Hot encoded for the first 5 images in the test-set are:
End of explanation
"""
data.test.cls = np.array([label.argmax() for label in data.test.labels])
"""
Explanation: We also need the classes as single numbers for various comparisons and performance measures, so we convert the One-Hot encoded vectors to a single number by taking the index of the highest element. Note that the word 'class' is a keyword used in Python so we need to use the name 'cls' instead.
End of explanation
"""
data.test.cls[0:5]
"""
Explanation: We can now see the class for the first five images in the test-set. Compare these to One-Hot encoded vector above. For example, the class for the first image is 7, which corresponds to a One-Hot encoded vector where all elements are zero except for the element with index 7.
End of explanation
"""
# MNIST images are 28 pixels in each dimension.
img_size = 28
# Images are stored in one-dimensional arrays of this length.
img_size_flat = img_size * img_size
# Tuple with height and width of images used to reshape arrays.
ima_shape = (img_size, img_size)
# Number of classes, one class for each of 10 digits.
num_classes = 10
"""
Explanation: Data Dimensions
The data dimensions are used in several places in source-code below. In computer programming, it is best to use variables and constants rather than having to hard-code specific numbers every time that number is used.
End of explanation
"""
def plot_images(images, cls_true, cls_pred=None):
assert len(images) == len(cls_true) == 9
# Create figure with 3x3 sub-plots.
fig, axes = plt.subplots(3, 3)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
for i, ax in enumerate(axes.flat):
# Plot image.
ax.imshow(images[i].reshape(img_shape), cmap='binary')
# Show true and predicted classes.
if cls_pred is None:
xlabel = "True: {0}".format(cls_true[i])
else:
xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i])
ax.set_xlabel(xlabel)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
"""
Explanation: Helper-functin for plotting images
Function used to plot 9 images in a 3*3 grid, and writing the true and predicted classes below each image.
End of explanation
"""
# Get the first images from the test-set.
images = data.test.images[0:9]
# Get the true classes for those images.
cls_true = data.test.cls[0:9]
# Plot the images and labels using our helper-function above.
plot_images(images=images, cls_true=cls_true)
"""
Explanation: Plot a few images to see if data is correct
End of explanation
"""
x = tf.placeholder(tf.float32, [None, img_size_flat])
"""
Explanation: TensorFlow Graph
The entir purpose of TensorFlow is to have a so-called computational graph that can be executed much more efficiently if the same calculations were to be performed directly in Python. TensorFlow can be more efficient than NumPy because TensorFlow knows the entire computation graph that must be executed, while NumPy only knows the computation of a single mathmatical operation at a time.
TensorFlow can also take automatically calculate the gradients that are needed to optimize the variables of the graph so as to make the model perform better. This is because the graph is a combination of simple mathematical expressions so the gradient of the entire graph can be calculated using the chain-rule for derivatives.
TensorFlow can also take advantage of multi-core CPUs as well as GPUs - and Google has even built special chips just for TensorFlow which are called TPUs (Tensor Processing Units) and are even faster than GPUs.
A TensorFlow graph consists of the following parts which will be detailed below:
Placeholder variables used to change the input to the graph
Model variables that are going to be optimized so as to make the model perform better
The model which is essentially just a mathematical function that calculates some output given the input in the placeholder variables and the model variables.
A cost measure that can be used to guide the optimization of the variables.
An optimization method which updates the variables of the model.
In addition, the TensorFlow graph may also contain various debugging statements e.g. for logging data to displayed using TensorBoard, which is not covered in this tutorial.
Placeholder variables
Placeholder variables serve as the input to the graph that we may change each time we execute the graph. We call this feeding the placeholder variables and it is demonstrated further below.
First we define the placeholder variable for the input images. This allows us to change the images that are input to the TensorFlow graph. This is a so-called tensor, which just means that it is a multi-dimensional vector or matrix. The data-type is set to float32 and the shape is set to [None, img_size_flat], where None means that the tensor may hold an arbitrary number of images with each image being a vector of length image_size_flat.
End of explanation
"""
y_true = tf.placeholder(tf.float32, [None, num_classes])
"""
Explanation: Next we have the placeholder variable for the true labels associated with the images that were input in the placeholder variable x. The shape of this variable is [None, num_clssses] which means it may hold an arbitrary number of labels and each label is a vector of length num_classes which is 10 in this case.
End of explanation
"""
y_true_cls = tf.placeholder(tf.int64, [None])
"""
Explanation: Finally we have the placeholder variable for the true classes of each image in the placeholder variable x. These are integers and the dimensionality of this placeholder variable is se to [None] which means the placeholder variable is a one-dimensional vector or arbitrary length.
End of explanation
"""
logits = tf.matmul(x, weights) + biases
"""
Explanation: Model
This simple mathematical model multiplies in the placeholder variable x with the weights and then holdsthe biases.
The result is a matrix of shape [num_images, num_classes] because x has shape [num_images, img_size_flat] and weights has shape [img_size_flat, num_classes], so the multiplication of those two matricies is a matrix shape [num_images, num_classes] and then the biases vector is added to each row of matrix.
Note that the name logits is typical TensorFlow terminology, but other people may call the variable something else.
End of explanation
"""
y_pred = tf.nn.softmax(logits)
"""
Explanation: Now logits is a matrix with num_images rows and num_classes columns, where the element of the ith row and jth column is an estimate of how likely the ith input image is to be of jth class.
However, these estimates are a bit rouh and difficult to interpret because the numbers may be very small or large, so we want to normalize them so that each row of the logits matrix sums to one, and each element is limited between zero and one. This is calculated using the so-called softmax function and the result is stored in y_pred.
End of explanation
"""
y_pred_cls = tf.argmax(y_pred, dimension = 1)
"""
Explanation: The predicted class can be calculated from the y_pred matrix by taking the index of the largest element in each row.
End of explanation
"""
cross-entropy = tf.nn.softmax_cross_entropy_logits(logits=logits,
labels=y_true)
"""
Explanation: Cost-function to be optimized
To make the model better at classifying the input images, we must somehow change the variables for weights and biases. To do this we first need to know how well the model currently performs by comparing the predicted output of the model y_pred to the desired output y_true.
The cross-entropy is a performance measure used in classification. The cross-entropy is a continuous function that is always positive and if the predicted output of the model exactly matches the desired output the the cross-entropy equals zero. The goal of optimization if therefore to minimize the cross-entropy so it gets as close to zero as possible by changing the weights and biases of the model.
TensorFlow has a built-in function for calculating the cross-entropy. Note that it uses the values of the logits because it also calculates the softmax internally.
End of explanation
"""
cost = tf.reduce_mean(cross-entropy)
"""
Explanation: We have now calculated the cross-entropy for each of the image classifications so we have a measure of how well the model performs on each image individually. But in order to use the cross-entropy to guide the optimization of the model's variables we need a single scalar value, so we simply take the average of the cross-entropy for all the entire classifications.
End of explanation
"""
optimizer = tf.train.GradientDescentOptimizer(learning-rate=0.5).minimize(cost)
"""
Explanation: Optimization method
Now that we have a cost measure that must be minimized, we can then reduce an optimizer. in this case it is the basic form of Gradient Descent where the step-size is set to 0.5.
Note that optimization is not performed at this point. In fact, nothing is calculated at all, we just add the optimizer-object to the TensorFlow graph for later execution.
End of explanation
"""
correct_prediction = tf.equal(y_pred_cls,y_true_cls)
"""
Explanation: Performance measures
We need a few more performance measures to display the progress to the user.
This is a vector of booleans whether the predicted class equals the true class of each image.
End of explanation
"""
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
"""
Explanation: This calculates the classification accuracy by first type-casting the vector of booleans to floats, so that False becomes 0, and True becomes 1, and then calculating the average of these numbers.
End of explanation
"""
session = tf.Session()
"""
Explanation: TensorFlow Run
Create TensorFlow session
Once the TensorFlow graph has been created, we have to create a TensorFlow session which is used to execute the graph.
End of explanation
"""
session.run(tf.global_variables_initializer())
"""
Explanation: Initilize variables
The variables for weights and biases must be initilized before we start optimizing them.
End of explanation
"""
batch_size = 100
"""
Explanation: Helper-function to perform optimization iterations
There are 50,000 images in the training-set, it takes a long time to calculate the gradient of the model using all these images. We therefore use Stochastic Gradient Descent which only uses a small batch of images in each iteration of the optimizer.
End of explanation
"""
def optimize(num_iterations):
for i in range(num_iterations):
x_batch, y_true_batch = data.train.next_batch(batch_size)
feed_dict_train = {x: x_batch,
y_true: y_true_batch}
session.run(optimizer, feed_dict = feed_dict_train)
"""
Explanation: Function for performing a number of optimization iterations so as to gradually improve the weights and biases of the model. In each iteration, a new batch of data is selected from the training-set and then TensorFlow executes the optimizer using those training samples.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/uhh/cmip6/models/sandbox-2/ocean.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'uhh', 'sandbox-2', 'ocean')
"""
Explanation: ES-DOC CMIP6 Model Properties - Ocean
MIP Era: CMIP6
Institute: UHH
Source ID: SANDBOX-2
Topic: Ocean
Sub-Topics: Timestepping Framework, Advection, Lateral Physics, Vertical Physics, Uplow Boundaries, Boundary Forcing.
Properties: 133 (101 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:41
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Seawater Properties
3. Key Properties --> Bathymetry
4. Key Properties --> Nonoceanic Waters
5. Key Properties --> Software Properties
6. Key Properties --> Resolution
7. Key Properties --> Tuning Applied
8. Key Properties --> Conservation
9. Grid
10. Grid --> Discretisation --> Vertical
11. Grid --> Discretisation --> Horizontal
12. Timestepping Framework
13. Timestepping Framework --> Tracers
14. Timestepping Framework --> Baroclinic Dynamics
15. Timestepping Framework --> Barotropic
16. Timestepping Framework --> Vertical Physics
17. Advection
18. Advection --> Momentum
19. Advection --> Lateral Tracers
20. Advection --> Vertical Tracers
21. Lateral Physics
22. Lateral Physics --> Momentum --> Operator
23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
24. Lateral Physics --> Tracers
25. Lateral Physics --> Tracers --> Operator
26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
27. Lateral Physics --> Tracers --> Eddy Induced Velocity
28. Vertical Physics
29. Vertical Physics --> Boundary Layer Mixing --> Details
30. Vertical Physics --> Boundary Layer Mixing --> Tracers
31. Vertical Physics --> Boundary Layer Mixing --> Momentum
32. Vertical Physics --> Interior Mixing --> Details
33. Vertical Physics --> Interior Mixing --> Tracers
34. Vertical Physics --> Interior Mixing --> Momentum
35. Uplow Boundaries --> Free Surface
36. Uplow Boundaries --> Bottom Boundary Layer
37. Boundary Forcing
38. Boundary Forcing --> Momentum --> Bottom Friction
39. Boundary Forcing --> Momentum --> Lateral Friction
40. Boundary Forcing --> Tracers --> Sunlight Penetration
41. Boundary Forcing --> Tracers --> Fresh Water Forcing
1. Key Properties
Ocean key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of ocean model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean model code (NEMO 3.6, MOM 5.0,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of ocean model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the ocean.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of prognostic variables in the ocean component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Seawater Properties
Physical properties of seawater in ocean
2.1. Eos Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
"""
Explanation: 2.2. Eos Functional Temp
Is Required: TRUE Type: ENUM Cardinality: 1.1
Temperature used in EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
"""
Explanation: 2.3. Eos Functional Salt
Is Required: TRUE Type: ENUM Cardinality: 1.1
Salinity used in EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
"""
Explanation: 2.4. Eos Functional Depth
Is Required: TRUE Type: ENUM Cardinality: 1.1
Depth or pressure used in EOS for sea water ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2.5. Ocean Freezing Point
Is Required: TRUE Type: ENUM Cardinality: 1.1
Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.6. Ocean Specific Heat
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Specific heat in ocean (cpocean) in J/(kg K)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.7. Ocean Reference Density
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Boussinesq reference density (rhozero) in kg / m3
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Bathymetry
Properties of bathymetry in ocean
3.1. Reference Dates
Is Required: TRUE Type: ENUM Cardinality: 1.1
Reference date of bathymetry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.2. Type
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the bathymetry fixed in time in the ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Ocean Smoothing
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe any smoothing or hand editing of bathymetry in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.4. Source
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe source of bathymetry in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Nonoceanic Waters
Non oceanic waters treatement in ocean
4.1. Isolated Seas
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how isolated seas is performed
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. River Mouth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how river mouth mixing or estuaries specific treatment is performed
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Software Properties
Software properties of ocean code
5.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Resolution
Resolution in the ocean grid
6.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 50(Equator)-100km or 0.1-0.5 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.4. Number Of Horizontal Gridpoints
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.5. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.6. Is Adaptive Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.7. Thickness Level 1
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Thickness of first surface ocean level (in meters)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Tuning Applied
Tuning methodology for ocean component
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation
Conservation in the ocean component
8.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Brief description of conservation methodology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.N
Properties conserved in the ocean by the numerical schemes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Consistency Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Any additional consistency properties (energy conversion, pressure gradient discretisation, ...)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Corrected Conserved Prognostic Variables
Is Required: FALSE Type: STRING Cardinality: 0.1
Set of variables which are conserved by more than the numerical scheme alone.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.5. Was Flux Correction Used
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Does conservation involve flux correction ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Grid
Ocean grid
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of grid in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Grid --> Discretisation --> Vertical
Properties of vertical discretisation in ocean
10.1. Coordinates
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of vertical coordinates in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 10.2. Partial Steps
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Using partial steps with Z or Z vertical coordinate in ocean ?*
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Grid --> Discretisation --> Horizontal
Type of horizontal discretisation scheme in ocean
11.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Staggering
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal grid staggering type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.3. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12. Timestepping Framework
Ocean Timestepping Framework
12.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of time stepping in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Diurnal Cycle
Is Required: TRUE Type: ENUM Cardinality: 1.1
Diurnal cycle type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Timestepping Framework --> Tracers
Properties of tracers time stepping in ocean
13.1. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracers time stepping scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Tracers time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Timestepping Framework --> Baroclinic Dynamics
Baroclinic dynamics in ocean
14.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Baroclinic dynamics type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Baroclinic dynamics scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.3. Time Step
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Baroclinic time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15. Timestepping Framework --> Barotropic
Barotropic time stepping in ocean
15.1. Splitting
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time splitting method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.2. Time Step
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Barotropic time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Timestepping Framework --> Vertical Physics
Vertical physics time stepping in ocean
16.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Details of vertical time stepping in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17. Advection
Ocean advection
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of advection in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
"""
Explanation: 18. Advection --> Momentum
Properties of lateral momemtum advection scheme in ocean
18.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of lateral momemtum advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Scheme Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean momemtum advection scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 18.3. ALE
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Using ALE for vertical advection ? (if vertical coordinates are sigma)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19. Advection --> Lateral Tracers
Properties of lateral tracer advection scheme in ocean
19.1. Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Order of lateral tracer advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 19.2. Flux Limiter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Monotonic flux limiter for lateral tracer advection scheme in ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19.3. Effective Order
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Effective order of limited lateral tracer advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.4. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Descriptive text for lateral tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19.5. Passive Tracers
Is Required: FALSE Type: ENUM Cardinality: 0.N
Passive tracers advected
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.6. Passive Tracers Advection
Is Required: FALSE Type: STRING Cardinality: 0.1
Is advection of passive tracers different than active ? if so, describe.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20. Advection --> Vertical Tracers
Properties of vertical tracer advection scheme in ocean
20.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Descriptive text for vertical tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 20.2. Flux Limiter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Monotonic flux limiter for vertical tracer advection scheme in ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21. Lateral Physics
Ocean lateral physics
21.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lateral physics in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
"""
Explanation: 21.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of transient eddy representation in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22. Lateral Physics --> Momentum --> Operator
Properties of lateral physics operator for momentum in ocean
22.1. Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Direction of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.2. Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Order of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.3. Discretisation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Discretisation of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Properties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean
23.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Lateral physics momemtum eddy viscosity coeff type in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 23.2. Constant Coefficient
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant, value of eddy viscosity coeff in lateral physics momemtum scheme (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.3. Variable Coefficient
Is Required: FALSE Type: STRING Cardinality: 0.1
If space-varying, describe variations of eddy viscosity coeff in lateral physics momemtum scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.4. Coeff Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe background eddy viscosity coeff in lateral physics momemtum scheme (give values in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.5. Coeff Backscatter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there backscatter in eddy viscosity coeff in lateral physics momemtum scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 24. Lateral Physics --> Tracers
Properties of lateral physics for tracers in ocean
24.1. Mesoscale Closure
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there a mesoscale closure in the lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 24.2. Submesoscale Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there a submesoscale mixing parameterisation (i.e Fox-Kemper) in the lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Lateral Physics --> Tracers --> Operator
Properties of lateral physics operator for tracers in ocean
25.1. Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Direction of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Order of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Discretisation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Discretisation of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Properties of eddy diffusity coeff in lateral physics tracers scheme in the ocean
26.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Lateral physics tracers eddy diffusity coeff type in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26.2. Constant Coefficient
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant, value of eddy diffusity coeff in lateral physics tracers scheme (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.3. Variable Coefficient
Is Required: FALSE Type: STRING Cardinality: 0.1
If space-varying, describe variations of eddy diffusity coeff in lateral physics tracers scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26.4. Coeff Background
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Describe background eddy diffusity coeff in lateral physics tracers scheme (give values in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 26.5. Coeff Backscatter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there backscatter in eddy diffusity coeff in lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Properties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean
27.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of EIV in lateral physics tracers in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 27.2. Constant Val
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If EIV scheme for tracers is constant, specify coefficient value (M2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.3. Flux Type
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of EIV flux (advective or skew)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.4. Added Diffusivity
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of EIV added diffusivity (constant, flow dependent or none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28. Vertical Physics
Ocean Vertical Physics
28.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vertical physics in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Properties of vertical physics in ocean
29.1. Langmuir Cells Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there Langmuir cells mixing in upper ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
*Properties of boundary layer (BL) mixing on tracers in the ocean *
30.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of boundary layer mixing for tracers in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.2. Closure Order
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If turbulent BL mixing of tracers, specific order of closure (0, 1, 2.5, 3)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant BL mixing of tracers, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background BL mixing of tracers coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
*Properties of boundary layer (BL) mixing on momentum in the ocean *
31.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of boundary layer mixing for momentum in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 31.2. Closure Order
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If turbulent BL mixing of momentum, specific order of closure (0, 1, 2.5, 3)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 31.3. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant BL mixing of momentum, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 31.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background BL mixing of momentum coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32. Vertical Physics --> Interior Mixing --> Details
*Properties of interior mixing in the ocean *
32.1. Convection Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of vertical convection in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.2. Tide Induced Mixing
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how tide induced mixing is modelled (barotropic, baroclinic, none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.3. Double Diffusion
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there double diffusion
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.4. Shear Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there interior shear mixing
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33. Vertical Physics --> Interior Mixing --> Tracers
*Properties of interior mixing on tracers in the ocean *
33.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of interior mixing for tracers in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 33.2. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant interior mixing of tracers, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33.3. Profile
Is Required: TRUE Type: STRING Cardinality: 1.1
Is the background interior mixing using a vertical profile for tracers (i.e is NOT constant) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background interior mixing of tracers coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 34. Vertical Physics --> Interior Mixing --> Momentum
*Properties of interior mixing on momentum in the ocean *
34.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of interior mixing for momentum in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 34.2. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant interior mixing of momentum, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34.3. Profile
Is Required: TRUE Type: STRING Cardinality: 1.1
Is the background interior mixing using a vertical profile for momentum (i.e is NOT constant) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background interior mixing of momentum coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 35. Uplow Boundaries --> Free Surface
Properties of free surface in ocean
35.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of free surface in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 35.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Free surface scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 35.3. Embeded Seaice
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the sea-ice embeded in the ocean model (instead of levitating) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36. Uplow Boundaries --> Bottom Boundary Layer
Properties of bottom boundary layer in ocean
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of bottom boundary layer in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.2. Type Of Bbl
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of bottom boundary layer in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 36.3. Lateral Mixing Coef
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If bottom BL is diffusive, specify value of lateral mixing coefficient (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36.4. Sill Overflow
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe any specific treatment of sill overflows
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37. Boundary Forcing
Ocean boundary forcing
37.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of boundary forcing in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.2. Surface Pressure
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how surface pressure is transmitted to ocean (via sea-ice, nothing specific,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.3. Momentum Flux Correction
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any type of ocean surface momentum flux correction and, if applicable, how it is applied and where.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.4. Tracers Flux Correction
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any type of ocean surface tracers flux correction and, if applicable, how it is applied and where.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.5. Wave Effects
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how wave effects are modelled at ocean surface.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.6. River Runoff Budget
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how river runoff from land surface is routed to ocean and any global adjustment done.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.7. Geothermal Heating
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how geothermal heating is present at ocean bottom.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 38. Boundary Forcing --> Momentum --> Bottom Friction
Properties of momentum bottom friction in ocean
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of momentum bottom friction in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 39. Boundary Forcing --> Momentum --> Lateral Friction
Properties of momentum lateral friction in ocean
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of momentum lateral friction in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Properties of sunlight penetration scheme in ocean
40.1. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of sunlight penetration scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 40.2. Ocean Colour
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the ocean sunlight penetration scheme ocean colour dependent ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 40.3. Extinction Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe and list extinctions depths for sunlight penetration scheme (if applicable).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Properties of surface fresh water forcing in ocean
41.1. From Atmopshere
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of surface fresh water forcing from atmos in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41.2. From Sea Ice
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of surface fresh water forcing from sea-ice in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 41.3. Forced Mode Restoring
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of surface salinity restoring in forced mode (OMIP)
End of explanation
"""
|
Jackie789/JupyterNotebooks | MultivariableRegression_ChallengeWithCrossValidation.ipynb | gpl-3.0 | import warnings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import linear_model
# Suppress annoying harmless error.
warnings.filterwarnings(
action="ignore"
)
data_path = "https://raw.githubusercontent.com/Thinkful-Ed/data-201-resources/master/New_York_offenses/NEW_YORK-Offenses_Known_to_Law_Enforcement_by_City_2013%20-%2013tbl8ny.csv"
data = pd.read_csv(data_path, delimiter = ',', skiprows=4, header=0, skipfooter=3, thousands=',')
data = pd.DataFrame(data)
data.head()
# Instantiate and fit our model.
regr = linear_model.LinearRegression()
Y = data['Property\ncrime'].values.reshape(-1, 1)
X = data[["Larceny-\ntheft", "Motor\nvehicle\ntheft", "Burglary"]]
regr.fit(X, Y)
# Inspect the results.
print('\nCoefficients: \n', regr.coef_)
print('\nIntercept: \n', regr.intercept_)
print('\nR-squared:')
print(regr.score(X, Y))
"""
Explanation: Multivariable Regression Model of FBI Property Crime Statistics
Using the FBI:UCR Crime dataset, which can be found here, build a regression model to predict property crimes.
The FBI defines property crime as including the offenses of burglary, larceny-theft, motor vehicle theft, and arson. To predict property crime, one can simply use these features.
End of explanation
"""
plt.figure(figsize=(15,5))
sns.pairplot(data, vars =['Property\ncrime', 'Population', 'Violent\ncrime',
'Murder and\nnonnegligent\nmanslaughter',
'Rape\n(legacy\ndefinition)2',
'Robbery', 'Aggravated\nassault'])
plt.show()
"""
Explanation: Perfect accuracy, as expected. However......
Predicting ALL property crimes is a more interesting question.
Building a Model to Predict Property Crimes (without using the Property Crime features)
To start, let's take a look at how each of the non-property crime features interact with property crime.
End of explanation
"""
dataCleaned = data[data["Property\ncrime"] < 20000]
plt.figure(figsize=(15,5))
sns.pairplot(dataCleaned, vars =['Property\ncrime', 'Population', 'Violent\ncrime',
'Murder and\nnonnegligent\nmanslaughter',
'Rape\n(legacy\ndefinition)2',
'Robbery', 'Aggravated\nassault'])
plt.show()
plt.scatter(dataCleaned["Property\ncrime"], dataCleaned["Murder and\nnonnegligent\nmanslaughter"])
plt.title('Raw values')
plt.xlabel("Property Crime")
plt.ylabel("Murder")
plt.show()
"""
Explanation: That single outlier is making the relationships difficult to view. Let's remove the outlier.
End of explanation
"""
dataCleaned["Murder"] = dataCleaned['Murder and\nnonnegligent\nmanslaughter'].apply(lambda x: 0 if x == 0 else 1)
plt.scatter(dataCleaned["Property\ncrime"], dataCleaned["Murder"])
plt.title('Raw values')
plt.xlabel("Property Crime")
plt.ylabel("Murder")
plt.show()
dataCleaned.head()
regr = linear_model.LinearRegression()
Y = dataCleaned['Property\ncrime'].values.reshape(-1, 1)
X = dataCleaned[['Population', 'Violent\ncrime',
'Murder and\nnonnegligent\nmanslaughter',
'Rape\n(legacy\ndefinition)2',
'Robbery', 'Aggravated\nassault']]
regr.fit(X, Y)
# Inspect the results.
print('\nCoefficients: \n', regr.coef_)
print('\nIntercept: \n', regr.intercept_)
print('\nR-squared:')
print(regr.score(X, Y))
"""
Explanation: There is a large number of 0's for Murder. Perhaps let's use a binary value for murder occurring vs no murder occurring.
End of explanation
"""
regr = linear_model.LinearRegression()
Y = dataCleaned['Property\ncrime'].values.reshape(-1, 1)
X = dataCleaned[['Population', 'Violent\ncrime',
'Murder', 'Rape\n(legacy\ndefinition)2',
'Robbery', 'Aggravated\nassault']]
regr.fit(X, Y)
# Inspect the results.
print('\nCoefficients: \n', regr.coef_)
print('\nIntercept: \n', regr.intercept_)
print('\nR-squared:')
print(regr.score(X, Y))
"""
Explanation: Noice!!
What about performance when the binary Murder feature is used?
End of explanation
"""
data["Murder"] = data['Murder and\nnonnegligent\nmanslaughter'].apply(lambda x: 0 if x == 0 else 1)
regr = linear_model.LinearRegression()
Y = data['Property\ncrime'].values.reshape(-1, 1)
X = data[['Population', 'Violent\ncrime',
'Murder', 'Rape\n(legacy\ndefinition)2',
'Robbery', 'Aggravated\nassault']]
regr.fit(X, Y)
# Inspect the results.
print('\nCoefficients: \n', regr.coef_)
print('\nIntercept: \n', regr.intercept_)
print('\nR-squared:')
print(regr.score(X, Y))
"""
Explanation: There is a slight increase of performance when the binary indicator for murder is used.
Leave no man behind!
Reintroduce the outlier to the model.
End of explanation
"""
regr = linear_model.LinearRegression()
Y = dataCleaned['Property\ncrime'].values.reshape(-1, 1)
X = dataCleaned[['Population', 'Violent\ncrime',
'Murder', 'Rape\n(legacy\ndefinition)2',
'Robbery', 'Aggravated\nassault']]
regr.fit(X, Y)
# Inspect the results.
print('\nCoefficients: \n', regr.coef_)
print('\nIntercept: \n', regr.intercept_)
print('\nR-squared:')
print(regr.score(X, Y))
"""
Explanation: Hmmmm....it seems that outlier result has also heavily weighted the R-squared result and coefficients. The linear model which did not incorporate the outlier is likely to be a better indicator of overall trends and accuracy.
Best Model:
End of explanation
"""
from sklearn.cross_validation import cross_val_score
regr = linear_model.LinearRegression()
y = data['Property\ncrime'].values.reshape(-1, 1)
X = data[['Population', 'Violent\ncrime',
'Murder', 'Rape\n(legacy\ndefinition)2',
'Robbery', 'Aggravated\nassault']]
scores = cross_val_score(regr, X, y, cv = 10)
print("Percent accuracy within each fold:\n")
print(scores)
print("\nMean accuracy:\n")
print(scores.mean())
"""
Explanation: Validating regression models for prediction
Now let's use cross-validation to obtain a more accurate description of our accuracy:
End of explanation
"""
data_path = "files/table_8_offenses_known_to_law_enforcement_california_by_city_2013.csv"
dataCA = pd.read_csv(data_path, delimiter = ',', skiprows=4, header=0, skipfooter=3, thousands=',')
dataCA = pd.DataFrame(dataCA)
dataCA.head()
dataCA["Murder"] = dataCA['Murder and\nnonnegligent\nmanslaughter'].apply(lambda x: 0 if x == 0 else 1)
y = dataCA['Property\ncrime'].values.reshape(-1, 1)
X = dataCA[['Population', 'Violent\ncrime',
'Murder', 'Rape\n(legacy\ndefinition)2',
'Robbery', 'Aggravated\nassault']]
scores = cross_val_score(regr, X, y, cv = 10)
print("Percent accuracy within each fold:\n")
print(scores)
print("\nMean accuracy:\n")
print(scores.mean())
"""
Explanation: Test the Model with Data From Another State
Now let's test our model with the 2013 Crime Rate dataset for California. Will the predictive power be similar?
End of explanation
"""
|
tensorflow/docs-l10n | site/ko/tutorials/keras/regression.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""
Explanation: Copyright 2018 The TensorFlow Authors.
End of explanation
"""
# 산점도 행렬을 그리기 위해 seaborn 패키지를 설치합니다
!pip install seaborn
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
"""
Explanation: 자동차 연비 예측하기: 회귀
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/keras/regression"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org에서 보기</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/tutorials/keras/regression.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/tutorials/keras/regression.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃허브(GitHub) 소스 보기</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/tutorials/keras/regression.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도
불구하고 공식 영문 문서의 내용과 일치하지 않을 수 있습니다.
이 번역에 개선할 부분이 있다면
tensorflow/docs-l10n 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.
문서 번역이나 리뷰에 참여하려면
[email protected]로
메일을 보내주시기 바랍니다.
회귀(regression)는 가격이나 확률 같이 연속된 출력 값을 예측하는 것이 목적입니다. 이와는 달리 분류(classification)는 여러개의 클래스 중 하나의 클래스를 선택하는 것이 목적입니다(예를 들어, 사진에 사과 또는 오렌지가 포함되어 있을 때 어떤 과일인지 인식하는 것).
이 노트북은 Auto MPG 데이터셋을 사용하여 1970년대 후반과 1980년대 초반의 자동차 연비를 예측하는 모델을 만듭니다. 이 기간에 출시된 자동차 정보를 모델에 제공하겠습니다. 이 정보에는 실린더 수, 배기량, 마력(horsepower), 공차 중량 같은 속성이 포함됩니다.
이 예제는 tf.keras API를 사용합니다. 자세한 내용은 케라스 가이드를 참고하세요.
End of explanation
"""
dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
"""
Explanation: Auto MPG 데이터셋
이 데이터셋은 UCI 머신 러닝 저장소에서 다운로드할 수 있습니다.
데이터 구하기
먼저 데이터셋을 다운로드합니다.
End of explanation
"""
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
"""
Explanation: 판다스를 사용하여 데이터를 읽습니다.
End of explanation
"""
dataset.isna().sum()
"""
Explanation: 데이터 정제하기
이 데이터셋은 일부 데이터가 누락되어 있습니다.
End of explanation
"""
dataset = dataset.dropna()
"""
Explanation: 문제를 간단하게 만들기 위해서 누락된 행을 삭제하겠습니다.
End of explanation
"""
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
"""
Explanation: "Origin" 열은 수치형이 아니고 범주형이므로 원-핫 인코딩(one-hot encoding)으로 변환하겠습니다:
End of explanation
"""
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
"""
Explanation: 데이터셋을 훈련 세트와 테스트 세트로 분할하기
이제 데이터를 훈련 세트와 테스트 세트로 분할합니다.
테스트 세트는 모델을 최종적으로 평가할 때 사용합니다.
End of explanation
"""
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
"""
Explanation: 데이터 조사하기
훈련 세트에서 몇 개의 열을 선택해 산점도 행렬을 만들어 살펴 보겠습니다.
End of explanation
"""
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
"""
Explanation: 전반적인 통계도 확인해 보죠:
End of explanation
"""
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
"""
Explanation: 특성과 레이블 분리하기
특성에서 타깃 값 또는 "레이블"을 분리합니다. 이 레이블을 예측하기 위해 모델을 훈련시킬 것입니다.
End of explanation
"""
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
"""
Explanation: 데이터 정규화
위 train_stats 통계를 다시 살펴보고 각 특성의 범위가 얼마나 다른지 확인해 보죠.
특성의 스케일과 범위가 다르면 정규화(normalization)하는 것이 권장됩니다. 특성을 정규화하지 않아도 모델이 수렴할 수 있지만, 훈련시키기 어렵고 입력 단위에 의존적인 모델이 만들어집니다.
노트: 의도적으로 훈련 세트만 사용하여 통계치를 생성했습니다. 이 통계는 테스트 세트를 정규화할 때에도 사용됩니다. 이는 테스트 세트를 모델이 훈련에 사용했던 것과 동일한 분포로 투영하기 위해서입니다.
End of explanation
"""
def build_model():
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
"""
Explanation: 정규화된 데이터를 사용하여 모델을 훈련합니다.
주의: 여기에서 입력 데이터를 정규화하기 위해 사용한 통계치(평균과 표준편차)는 원-핫 인코딩과 마찬가지로 모델에 주입되는 모든 데이터에 적용되어야 합니다. 여기에는 테스트 세트는 물론 모델이 실전에 투입되어 얻은 라이브 데이터도 포함됩니다.
모델
모델 만들기
모델을 구성해 보죠. 여기에서는 두 개의 완전 연결(densely connected) 은닉층으로 Sequential 모델을 만들겠습니다. 출력 층은 하나의 연속적인 값을 반환합니다. 나중에 두 번째 모델을 만들기 쉽도록 build_model 함수로 모델 구성 단계를 감싸겠습니다.
End of explanation
"""
model.summary()
"""
Explanation: 모델 확인
.summary 메서드를 사용해 모델에 대한 간단한 정보를 출력합니다.
End of explanation
"""
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
"""
Explanation: 모델을 한번 실행해 보죠. 훈련 세트에서 10 샘플을 하나의 배치로 만들어 model.predict 메서드를 호출해 보겠습니다.
End of explanation
"""
# 에포크가 끝날 때마다 점(.)을 출력해 훈련 진행 과정을 표시합니다
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
"""
Explanation: 제대로 작동하는 것 같네요. 결괏값의 크기와 타입이 기대했던 대로입니다.
모델 훈련
이 모델을 1,000번의 에포크(epoch) 동안 훈련합니다. 훈련 정확도와 검증 정확도는 history 객체에 기록됩니다.
End of explanation
"""
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
import matplotlib.pyplot as plt
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure(figsize=(8,12))
plt.subplot(2,1,1)
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mae'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mae'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.subplot(2,1,2)
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mse'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mse'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
"""
Explanation: history 객체에 저장된 통계치를 사용해 모델의 훈련 과정을 시각화해 보죠.
End of explanation
"""
model = build_model()
# patience 매개변수는 성능 향상을 체크할 에포크 횟수입니다
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
"""
Explanation: 이 그래프를 보면 수 백번 에포크를 진행한 이후에는 모델이 거의 향상되지 않는 것 같습니다. model.fit 메서드를 수정하여 검증 점수가 향상되지 않으면 자동으로 훈련을 멈추도록 만들어 보죠. 에포크마다 훈련 상태를 점검하기 위해 EarlyStopping 콜백(callback)을 사용하겠습니다. 지정된 에포크 횟수 동안 성능 향상이 없으면 자동으로 훈련이 멈춥니다.
이 콜백에 대해 더 자세한 내용은 여기를 참고하세요.
End of explanation
"""
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2)
print("테스트 세트의 평균 절대 오차: {:5.2f} MPG".format(mae))
"""
Explanation: 이 그래프를 보면 검증 세트의 평균 오차가 약 +/- 2 MPG입니다. 좋은 결과인가요? 이에 대한 평가는 여러분에게 맡기겠습니다.
모델을 훈련할 때 사용하지 않았던 테스트 세트에서 모델의 성능을 확인해 보죠. 이를 통해 모델이 실전에 투입되었을 때 모델의 성능을 짐작할 수 있습니다:
End of explanation
"""
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
"""
Explanation: 예측
마지막으로 테스트 세트에 있는 샘플을 사용해 MPG 값을 예측해 보겠습니다:
End of explanation
"""
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
"""
Explanation: 모델이 꽤 잘 예측한 것 같습니다. 오차의 분포를 살펴 보죠.
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive/09_sequence/sinewaves.ipynb | apache-2.0 | !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
!pip install --user google-cloud-bigquery==1.25.0
"""
Explanation: Time Series Prediction
Objectives
1. Build a linear, DNN and CNN model in Keras.
2. Build a simple RNN model and a multi-layer RNN model in Keras.
In this lab we will with a linear, DNN and CNN model
Since the features of our model are sequential in nature, we'll next look at how to build various RNN models in Keras. We'll start with a simple RNN model and then see how to create a multi-layer RNN in Keras.
We will be exploring a lot of different model types in this notebook.
End of explanation
"""
PROJECT = "your-gcp-project-here" # REPLACE WITH YOUR PROJECT NAME
BUCKET = "your-gcp-bucket-here" # REPLACE WITH YOUR BUCKET
REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
%env
PROJECT = PROJECT
BUCKET = BUCKET
REGION = REGION
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from google.cloud import bigquery
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (Dense, DenseFeatures,
Conv1D, MaxPool1D,
Reshape, RNN,
LSTM, GRU, Bidirectional)
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
from tensorflow.keras.optimizers import Adam
# To plot pretty figures
%matplotlib inline
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# For reproducible results.
from numpy.random import seed
seed(1)
tf.random.set_seed(2)
"""
Explanation: Note: Restart your kernel to use updated packages.
Kindly ignore the deprecation warnings and incompatibility errors related to google-cloud-storage.
Load necessary libraries and set up environment variables
End of explanation
"""
%%time
bq = bigquery.Client(project=PROJECT)
bq_query = '''
#standardSQL
SELECT
symbol,
Date,
direction,
close_values_prior_260
FROM
`stock_market.eps_percent_change_sp500`
LIMIT
100
'''
"""
Explanation: Explore time series data
We'll start by pulling a small sample of the time series data from Big Query and write some helper functions to clean up the data for modeling. We'll use the data from the percent_change_sp500 table in BigQuery. The close_values_prior_260 column contains the close values for any given stock for the previous 260 days.
End of explanation
"""
def clean_data(input_df):
"""Cleans data to prepare for training.
Args:
input_df: Pandas dataframe.
Returns:
Pandas dataframe.
"""
df = input_df.copy()
# Remove inf/na values.
real_valued_rows = ~(df == np.inf).max(axis=1)
df = df[real_valued_rows].dropna()
# TF doesn't accept datetimes in DataFrame.
df['Date'] = pd.to_datetime(df['Date'], errors='coerce')
df['Date'] = df['Date'].dt.strftime('%Y-%m-%d')
# TF requires numeric label.
df['direction_numeric'] = df['direction'].apply(lambda x: {'DOWN': 0,
'STAY': 1,
'UP': 2}[x])
return df
"""
Explanation: The function clean_data below does three things:
1. First, we'll remove any inf or NA values
2. Next, we parse the Date field to read it as a string.
3. Lastly, we convert the label direction into a numeric quantity, mapping 'DOWN' to 0, 'STAY' to 1 and 'UP' to 2.
End of explanation
"""
STOCK_HISTORY_COLUMN = 'close_values_prior_260'
COL_NAMES = ['day_' + str(day) for day in range(0, 260)]
LABEL = 'direction_numeric'
def _scale_features(df):
"""z-scale feature columns of Pandas dataframe.
Args:
features: Pandas dataframe.
Returns:
Pandas dataframe with each column standardized according to the
values in that column.
"""
avg = df.mean()
std = df.std()
return (df - avg) / std
def create_features(df, label_name):
"""Create modeling features and label from Pandas dataframe.
Args:
df: Pandas dataframe.
label_name: str, the column name of the label.
Returns:
Pandas dataframe
"""
# Expand 1 column containing a list of close prices to 260 columns.
time_series_features = df[STOCK_HISTORY_COLUMN].apply(pd.Series)
# Rename columns.
time_series_features.columns = COL_NAMES
time_series_features = _scale_features(time_series_features)
# Concat time series features with static features and label.
label_column = df[LABEL]
return pd.concat([time_series_features,
label_column], axis=1)
"""
Explanation: Read data and preprocessing
Before we begin modeling, we'll preprocess our features by scaling to the z-score. This will ensure that the range of the feature values being fed to the model are comparable and should help with convergence during gradient descent.
End of explanation
"""
def _create_split(phase):
"""Create string to produce train/valid/test splits for a SQL query.
Args:
phase: str, either TRAIN, VALID, or TEST.
Returns:
String.
"""
floor, ceiling = '2002-11-01', '2010-07-01'
if phase == 'VALID':
floor, ceiling = '2010-07-01', '2011-09-01'
elif phase == 'TEST':
floor, ceiling = '2011-09-01', '2012-11-30'
return '''
WHERE Date >= '{0}'
AND Date < '{1}'
'''.format(floor, ceiling)
def create_query(phase):
"""Create SQL query to create train/valid/test splits on subsample.
Args:
phase: str, either TRAIN, VALID, or TEST.
sample_size: str, amount of data to take for subsample.
Returns:
String.
"""
basequery = """
#standardSQL
SELECT
symbol,
Date,
direction,
close_values_prior_260
FROM
`stock_market.eps_percent_change_sp500`
"""
return basequery + _create_split(phase)
"""
Explanation: Make train-eval-test split
Next, we'll make repeatable splits for our train/validation/test datasets and save these datasets to local csv files. The query below will take a subsample of the entire dataset and then create a 70-15-15 split for the train/validation/test sets.
End of explanation
"""
N_TIME_STEPS = 260
N_LABELS = 3
Xtrain = pd.read_csv('stock-train.csv')
Xvalid = pd.read_csv('stock-valid.csv')
ytrain = Xtrain.pop(LABEL)
yvalid = Xvalid.pop(LABEL)
ytrain_categorical = to_categorical(ytrain.values)
yvalid_categorical = to_categorical(yvalid.values)
"""
Explanation: Modeling
For experimentation purposes, we'll train various models using data we can fit in memory using the .csv files we created above.
End of explanation
"""
def plot_curves(train_data, val_data, label='Accuracy'):
"""Plot training and validation metrics on single axis.
Args:
train_data: list, metrics obtrained from training data.
val_data: list, metrics obtained from validation data.
label: str, title and label for plot.
Returns:
Matplotlib plot.
"""
plt.plot(np.arange(len(train_data)) + 0.5,
train_data,
"b.-", label="Training " + label)
plt.plot(np.arange(len(val_data)) + 1,
val_data, "r.-",
label="Validation " + label)
plt.gca().xaxis.set_major_locator(mpl.ticker.MaxNLocator(integer=True))
plt.legend(fontsize=14)
plt.xlabel("Epochs")
plt.ylabel(label)
plt.grid(True)
"""
Explanation: To monitor training progress and compare evaluation metrics for different models, we'll use the function below to plot metrics captured from the training job such as training and validation loss or accuracy.
End of explanation
"""
sum(yvalid == ytrain.value_counts().idxmax()) / yvalid.shape[0]
"""
Explanation: Baseline
Before we begin modeling in Keras, let's create a benchmark using a simple heuristic. Let's see what kind of accuracy we would get on the validation set if we predict the majority class of the training set.
End of explanation
"""
model = Sequential()
model.add(Dense(units=N_LABELS,
activation='softmax',
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.compile(optimizer=Adam(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=30,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
"""
Explanation: Ok. So just naively guessing the most common outcome UP will give about 29.5% accuracy on the validation set.
Linear model
We'll start with a simple linear model, mapping our sequential input to a single fully dense layer.
End of explanation
"""
np.mean(history.history['val_accuracy'][-5:])
"""
Explanation: The accuracy seems to level out pretty quickly. To report the accuracy, we'll average the accuracy on the validation set across the last few epochs of training.
End of explanation
"""
dnn_hidden_units = [16, 8]
model = Sequential()
for layer in dnn_hidden_units:
model.add(Dense(units=layer,
activation="relu"))
model.add(Dense(units=N_LABELS,
activation="softmax",
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.compile(optimizer=Adam(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=10,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
"""
Explanation: Deep Neural Network
The linear model is an improvement on our naive benchmark. Perhaps we can do better with a more complicated model. Next, we'll create a deep neural network with Keras. We'll experiment with a two layer DNN here but feel free to try a more complex model or add any other additional techniques to try an improve your performance.
End of explanation
"""
model = Sequential()
# Convolutional layer
model.add(Reshape(target_shape=[N_TIME_STEPS, 1]))
model.add(Conv1D(filters=5,
kernel_size=5,
strides=2,
padding="valid",
input_shape=[None, 1]))
model.add(MaxPool1D(pool_size=2,
strides=None,
padding='valid'))
# Flatten the result and pass through DNN.
model.add(tf.keras.layers.Flatten())
model.add(Dense(units=N_TIME_STEPS//4,
activation="relu"))
model.add(Dense(units=N_LABELS,
activation="softmax",
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.compile(optimizer=Adam(learning_rate=0.01),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=10,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
"""
Explanation: Convolutional Neural Network
The DNN does slightly better. Let's see how a convolutional neural network performs.
A 1-dimensional convolutional can be useful for extracting features from sequential data or deriving features from shorter, fixed-length segments of the data set. Check out the documentation for how to implement a Conv1d in Tensorflow. Max pooling is a downsampling strategy commonly used in conjunction with convolutional neural networks. Next, we'll build a CNN model in Keras using the Conv1D to create convolution layers and MaxPool1D to perform max pooling before passing to a fully connected dense layer.
End of explanation
"""
model = Sequential()
# Reshape inputs to pass through RNN layer.
model.add(Reshape(target_shape=[N_TIME_STEPS, 1]))
model.add(LSTM(N_TIME_STEPS // 8,
activation='relu',
return_sequences=False))
model.add(Dense(units=N_LABELS,
activation='softmax',
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
# Create the model.
model.compile(optimizer=Adam(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=40,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
"""
Explanation: Recurrent Neural Network
RNNs are particularly well-suited for learning sequential data. They retain state information from one iteration to the next by feeding the output from one cell as input for the next step. In the cell below, we'll build a RNN model in Keras. The final state of the RNN is captured and then passed through a fully connected layer to produce a prediction.
End of explanation
"""
rnn_hidden_units = [N_TIME_STEPS // 16,
N_TIME_STEPS // 32]
model = Sequential()
# Reshape inputs to pass through RNN layer.
model.add(Reshape(target_shape=[N_TIME_STEPS, 1]))
for layer in rnn_hidden_units[:-1]:
model.add(GRU(units=layer,
activation='relu',
return_sequences=True))
model.add(GRU(units=rnn_hidden_units[-1],
return_sequences=False))
model.add(Dense(units=N_LABELS,
activation="softmax",
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.compile(optimizer=Adam(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=50,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
"""
Explanation: Multi-layer RNN
Next, we'll build multi-layer RNN. Just as multiple layers of a deep neural network allow for more complicated features to be learned during training, additional RNN layers can potentially learn complex features in sequential data. For a multi-layer RNN the output of the first RNN layer is fed as the input into the next RNN layer.
End of explanation
"""
|
allandieguez/teaching | Matplotlib e Seaborn/Modulo 3 - Histogramas + Box Plot.ipynb | gpl-3.0 | import numpy as np
import os
import pandas as pd
""" habilitando plots no notebook """
%matplotlib inline
""" plot libs """
import matplotlib.pyplot as plt
import seaborn as sns
""" Configurando o Matplotlib para o modo manual """
plt.interactive(False)
"""
Explanation: Módulo 3: Histogramas + Box Plot
Tutorial
Imports
End of explanation
"""
""" DataFrame contendo 5 Séries com Distribuições Normais distintas """
df = pd.DataFrame(
columns=["S1", "S2", "S3", "S4", "S5"],
data=(
np.random.randn(100, 5) * np.array([10, 15, 50, 100, 200]) + np.array([0, 5, 30, 30, 50])
)
)
"""
Explanation: Dataset
End of explanation
"""
""" Histograma sem Normalização """
plt.figure(figsize=(12,8))
plt.hist(df.S1, bins=10)
plt.show()
""" Histograma com Normalização """
plt.figure(figsize=(12,8))
plt.hist(df.S1, bins=10, normed=True)
plt.show()
df.S1.describe()
""" Histograma de duas Séries """
plt.figure(figsize=(12,8))
plt.hist(df[["S1", "S2"]], bins=10, normed=True)
plt.show()
df[["S1", "S2"]].describe()
""" Histograma de mais de duas Séries """
plt.figure(figsize=(12,8))
plt.hist(df, bins=10, normed=True)
plt.show()
df.describe()
"""
Explanation: Histogram Plot
Matplotlib Puro
End of explanation
"""
plt.figure(figsize=(15,10))
plt.hist(df.S1, bins=10, normed=True, color="blue", alpha=0.5, label="S1")
plt.hist(df.S2, bins=10, normed=True, color="red", alpha=0.5, label="S2")
plt.legend()
plt.show()
"""
Explanation: Observação:
A visualização do histograma de várias séries é muito confusa usando o matplotlib sozinho. Às vezes é preferível usar uma visualização em uma janela só, como mostrada abaixo:
End of explanation
"""
""" Uma Série """
df.S1.hist(bins=10, normed=True, figsize=(12,8))
plt.show()
""" Histograma de duas Séries """
df[["S1", "S2"]].hist(bins=10, normed=True, figsize=(12,8))
plt.show()
df[["S1", "S2"]].describe()
""" Histograma de mais de duas Séries """
df.hist(bins=10, figsize=(12,8))
plt.show()
df.describe()
"""
Explanation: Usando Pandas
End of explanation
"""
""" Uma Série """
plt.figure( figsize=(12,8))
sns.distplot(df.S1)
plt.show()
"""
Explanation: Usando Seaborn
End of explanation
"""
""" Histograma de duas Séries (1) """
plt.figure( figsize=(12,8))
f, axes = plt.subplots(2, 1, figsize=(15, 8), sharex=True)
sns.distplot(df.S1, kde=False, color="blue", ax=axes[0])
sns.distplot(df.S2, kde=True, color="red", ax=axes[1])
plt.show()
df[["S1", "S2"]].describe()
""" Histograma de duas Séries (1) """
f, axes = plt.subplots(1, 2, figsize=(15, 8), sharex=True)
sns.distplot(df.S1, kde=False, color="blue", ax=axes[0])
sns.distplot(df.S2, kde=True, color="red", ax=axes[1])
plt.show()
df[["S1", "S2"]].describe()
""" Histograma de mais de duas Séries """
plt.figure( figsize=(12,8))
f, axes = plt.subplots(3, 2, figsize=(15, 8), sharex=True)
sns.distplot(df.S1, kde=False, color="blue", ax=axes[0, 0])
sns.distplot(df.S2, kde=True, color="red", ax=axes[0, 1])
sns.distplot(df.S3, kde=True, color="orange", ax=axes[1, 0])
sns.distplot(df.S4, kde=True, rug=True, color="gray", ax=axes[1, 1])
sns.distplot(df.S5, hist=False, kde_kws={"shade": True}, color="purple", ax=axes[2, 1])
plt.show()
df[["S1", "S2"]].describe()
"""
Explanation: Observação:
Seaborn não suporta hist plot de várias colunas de um DataFrame automaticamente
End of explanation
"""
""" default: vertical """
plt.figure(figsize=(15,10))
plt.boxplot(df.S1)
plt.show()
""" horizontal pra variar """
plt.figure(figsize=(15,10))
plt.boxplot(df.S1, vert=False)
plt.show()
"""
Explanation: Box Plot
Matplotlib Puro
Uma Série
End of explanation
"""
plt.figure(figsize=(15,10))
plt.boxplot(df.T)
plt.show()
"""
Explanation: Mais Séries
End of explanation
"""
tmp1 = df[["S1", "S2"]]
tmp2 = df[["S3", "S3"]]
tmp2.columns = tmp1.columns # append com colunas iguais não cria NaNs
tmp = tmp1.append(tmp2)
plt.figure(figsize=(15,10))
plt.boxplot(tmp.T)
plt.show()
tmp.describe(percentiles=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
"""
Explanation: Outliers!!!
End of explanation
"""
df.plot(kind="box", figsize=(15,10))
plt.show()
"""
Explanation: Pandas for the Go!
Todos Juntos e com Labels!
End of explanation
"""
tmp1 = df.copy()
tmp2 = df[["S5", "S5", "S5", "S5", "S5"]]
tmp2.columns = tmp1.columns # append com colunas iguais não cria NaNs
tmp = tmp1.append(tmp2)
plt.figure(figsize=(15,10))
tmp.plot(kind="box", figsize=(15,10))
plt.show()
tmp.describe(percentiles=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
"""
Explanation: Outliers!!!
End of explanation
"""
plt.figure(figsize=(15,10))
sns.boxplot(data=df)
plt.show()
"""
Explanation: Seaborn
End of explanation
"""
serie_original = pd.Series(np.random.randn(900)) * 32 + 230
outliers = pd.Series(np.random.randn(100)) * 320 + 230
"""
Explanation: Desafio
Objetivo:
Visualizar uma Distribuição Normal usando Histogramas e Box Plot
Dataset:
End of explanation
"""
""" Escreva a a Solução Aqui """
"""
Explanation: [ A ] Exploração
Parte 1
Usar Histograma e Box Plot para visualizar:
* a Série Original
* os Outliers
* a Série Original e os Outliers na mesma janela
End of explanation
"""
""" Escreva a a Solução Aqui """
"""
Explanation: Parte 2
Juntar a Série Original com os Outliers para criar uma Série Contaminada. Visualizar a nova série.
Depois, propor cortes na série para eliminar os outliers sem perder muitos dados de dentro da distribuição.
Em cada corte executado, visualizar a distribuição e as medidas de constrole da distribuição.
End of explanation
"""
""" Escreva a a Solução Aqui """
"""
Explanation: [ B ]
Criar um plot de duas janelas empilhadas na vertical, onde:
* A janela de cima é um Box Plot horizontal
* A Janela de baixo é um histograma
As duas janelas devem compartilhar o eixo horixontal, de forma que os atributos da distribuição estejam alinhados.
End of explanation
"""
|
keldLundgaard/Sandbox_democracy | Sandbox_response_rate_problem.ipynb | mit | N_members = 1254
N_respondents = 100
p = 0.6
N_yes = int(N_members*p)
N_no = int(N_members*(1-p))
"""
Explanation: Intro: Reliability of survey and voting results for low response rates
I will here briefly explore how we can get around the uncertainty around community surveys and votes where we don't have full participation. How can we ensure that the survey/voting result is not a fluke based on a low responds rate?
Example
Lets look at what the probability that with a higher than 50% majority in a sample mis-represent the true sentiment of the whole population. The idea is that if when we have a smaller sample size then the result can vary due to random fluctuation.
To simplify the problem, I will only look at a yes/no answer here, and simulated what different results will be given that the true sentiment is 60% for yes and where 100 people respond.
End of explanation
"""
runs = 10000 # sufficient to get good statistics
votes_samples = np.vstack(([
np.random.choice(
np.hstack((np.ones(N_yes), np.zeros(N_no))),
size=N_respondents, replace=False, p=None)
for i in range(runs)]))
results = np.mean(votes_samples, axis=1)
print('mean: %.3f Sigma: %.3f' % (np.mean(results), np.std(results)))
print('mean-2*sigma.: %.3f' % (np.mean(results)- 2*np.std(results)))
plt.hist(results, bins=59*2+1, range=[0.2, 0.8])
plt.show()
"""
Explanation: Now let's simulate different voting outcomes:
End of explanation
"""
runs = 50000
certain_result_two_percentile = []
certain_result_two_std = []
N_respondants_list = list(range(10, 20, 5)) + list(range(20, 100, 10)) + list(range(100, 700, 25))
for N_respondents in N_respondants_list:
p = 0.5
N_yes = int(N_members*p)
N_no = int(N_members*(1-p))
votes_samples = np.vstack(([
np.random.choice(
np.hstack((np.ones(N_yes), np.zeros(N_no))),
size=N_respondents, replace=False, p=None)
for i in range(runs)]))
results = np.mean(votes_samples, axis=1)
certain_result_two_percentile.append(np.sort(results)[-int(runs*0.02)])
certain_result_two_std.append(0.5 + 2*np.std(results))
fig, ax = plt.subplots(figsize=(12, 6))
x = N_respondants_list
plt.plot(x, certain_result_two_percentile, '-', label='2-percentile')
plt.plot(x, certain_result_two_std, '-', label='Two standard deviations')
ax.xaxis.set_major_locator(MultipleLocator(25))
ax.yaxis.set_major_locator(MultipleLocator(0.05))
plt.ylabel('Needed sentiment incl. margin')
plt.xlabel('Number of responses')
plt.legend()
plt.show()
"""
Explanation: With with 2 standard deviations (sigma) margin, we have 98% certainty (only looking at one side of the bell curve) that the true sentiment is yes. So, if we have about 100 people then we can be pretty confident that the Majority of Sandbox is for if more than 60% votes responses are.
Generalization: what majority sentiment is required at different response rates?
Let's see what majority sentiment is required for 98% certainty that the result of a survey/vote correspond to the sentiment of all members.
End of explanation
"""
runs = int(1e6)
N_respondents = 468
p = 0.5
N_yes = int(N_members*p)
N_no = int(N_members*(1-p))
votes_samples = np.vstack(([
np.random.choice(
np.hstack((np.ones(N_yes), np.zeros(N_no))),
size=N_respondents,
replace=False,
p=None)
for i in range(runs)]))
results = np.mean(votes_samples, axis=1)
print('98percentile: %.2f%%' % (100 * np.sort(results)[-int(runs * 0.02)]))
print('2 sigma value: %.2f%%' % (100 * (0.5 + 2*np.std(results))))
"""
Explanation: We can here see that the required majority goes steadily down as the number of respondents go up. At 25 responses a 70% majority is required, while with 300 respondents only a 55% majority is required.
Note, I include both the two standard deviations result at the 2-percentile of the runs to validate the two standard deviations approach.
Leadership Transition Process Proposal Vote
Analyzing the open transition proposal.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/ipsl/cmip6/models/sandbox-3/seaice.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ipsl', 'sandbox-3', 'seaice')
"""
Explanation: ES-DOC CMIP6 Model Properties - Seaice
MIP Era: CMIP6
Institute: IPSL
Source ID: SANDBOX-3
Topic: Seaice
Sub-Topics: Dynamics, Thermodynamics, Radiative Processes.
Properties: 80 (63 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-20 15:02:45
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties --> Model
2. Key Properties --> Variables
3. Key Properties --> Seawater Properties
4. Key Properties --> Resolution
5. Key Properties --> Tuning Applied
6. Key Properties --> Key Parameter Values
7. Key Properties --> Assumptions
8. Key Properties --> Conservation
9. Grid --> Discretisation --> Horizontal
10. Grid --> Discretisation --> Vertical
11. Grid --> Seaice Categories
12. Grid --> Snow On Seaice
13. Dynamics
14. Thermodynamics --> Energy
15. Thermodynamics --> Mass
16. Thermodynamics --> Salt
17. Thermodynamics --> Salt --> Mass Transport
18. Thermodynamics --> Salt --> Thermodynamics
19. Thermodynamics --> Ice Thickness Distribution
20. Thermodynamics --> Ice Floe Size Distribution
21. Thermodynamics --> Melt Ponds
22. Thermodynamics --> Snow Processes
23. Radiative Processes
1. Key Properties --> Model
Name of seaice model used.
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of sea ice model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Variables
List of prognostic variable in the sea ice model.
2.1. Prognostic
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of prognostic variables in the sea ice component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Seawater Properties
Properties of seawater relevant to sea ice
3.1. Ocean Freezing Point
Is Required: TRUE Type: ENUM Cardinality: 1.1
Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Ocean Freezing Point Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant seawater freezing point, specify this value.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Resolution
Resolution of the sea ice grid
4.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Number Of Horizontal Gridpoints
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Tuning Applied
Tuning applied to sea ice model component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Target
Is Required: TRUE Type: STRING Cardinality: 1.1
What was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Simulations
Is Required: TRUE Type: STRING Cardinality: 1.1
*Which simulations had tuning applied, e.g. all, not historical, only pi-control? *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Metrics Used
Is Required: TRUE Type: STRING Cardinality: 1.1
List any observed metrics used in tuning model/parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.5. Variables
Is Required: FALSE Type: STRING Cardinality: 0.1
Which variables were changed during the tuning process?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Key Parameter Values
Values of key parameters
6.1. Typical Parameters
Is Required: FALSE Type: ENUM Cardinality: 0.N
What values were specificed for the following parameters if used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Additional Parameters
Is Required: FALSE Type: STRING Cardinality: 0.N
If you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Assumptions
Assumptions made in the sea ice model
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.N
General overview description of any key assumptions made in this model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. On Diagnostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
Note any assumptions that specifically affect the CMIP6 diagnostic sea ice variables.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Missing Processes
Is Required: TRUE Type: STRING Cardinality: 1.N
List any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation
Conservation in the sea ice component
8.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Provide a general description of conservation methodology.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.2. Properties
Is Required: TRUE Type: ENUM Cardinality: 1.N
Properties conserved in sea ice by the numerical schemes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Budget
Is Required: TRUE Type: STRING Cardinality: 1.1
For each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.4. Was Flux Correction Used
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does conservation involved flux correction?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.5. Corrected Conserved Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List any variables which are conserved by more than the numerical scheme alone.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9. Grid --> Discretisation --> Horizontal
Sea ice discretisation in the horizontal
9.1. Grid
Is Required: TRUE Type: ENUM Cardinality: 1.1
Grid on which sea ice is horizontal discretised?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.2. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the type of sea ice grid?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.3. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the advection scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.4. Thermodynamics Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
What is the time step in the sea ice model thermodynamic component in seconds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.5. Dynamics Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
What is the time step in the sea ice model dynamic component in seconds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.6. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional horizontal discretisation details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Grid --> Discretisation --> Vertical
Sea ice vertical properties
10.1. Layering
Is Required: TRUE Type: ENUM Cardinality: 1.N
What type of sea ice vertical layers are implemented for purposes of thermodynamic calculations?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 10.2. Number Of Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
If using multi-layers specify how many.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional vertical grid details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 11. Grid --> Seaice Categories
What method is used to represent sea ice categories ?
11.1. Has Mulitple Categories
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Set to true if the sea ice model has multiple sea ice categories.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Number Of Categories
Is Required: TRUE Type: INTEGER Cardinality: 1.1
If using sea ice categories specify how many.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Category Limits
Is Required: TRUE Type: STRING Cardinality: 1.1
If using sea ice categories specify each of the category limits.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Ice Thickness Distribution Scheme
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the sea ice thickness distribution scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.5. Other
Is Required: FALSE Type: STRING Cardinality: 0.1
If the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12. Grid --> Snow On Seaice
Snow on sea ice details
12.1. Has Snow On Ice
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow on ice represented in this model?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 12.2. Number Of Snow Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels of snow on ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Snow Fraction
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how the snow fraction on sea ice is determined
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.4. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional details related to snow on ice.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Dynamics
Sea Ice Dynamics
13.1. Horizontal Transport
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of horizontal advection of sea ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Transport In Thickness Space
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of sea ice transport in thickness space (i.e. in thickness categories)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.3. Ice Strength Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Which method of sea ice strength formulation is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.4. Redistribution
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which processes can redistribute sea ice (including thickness)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Rheology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Rheology, what is the ice deformation formulation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Thermodynamics --> Energy
Processes related to energy in sea ice thermodynamics
14.1. Enthalpy Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the energy formulation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.2. Thermal Conductivity
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of thermal conductivity is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.3. Heat Diffusion
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of heat diffusion?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.4. Basal Heat Flux
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method by which basal ocean heat flux is handled?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.5. Fixed Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.6. Heat Content Of Precipitation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method by which the heat content of precipitation is handled.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.7. Precipitation Effects On Salinity
Is Required: FALSE Type: STRING Cardinality: 0.1
If precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Thermodynamics --> Mass
Processes related to mass in sea ice thermodynamics
15.1. New Ice Formation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method by which new sea ice is formed in open water.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Ice Vertical Growth And Melt
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method that governs the vertical growth and melt of sea ice.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.3. Ice Lateral Melting
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of sea ice lateral melting?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.4. Ice Surface Sublimation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method that governs sea ice surface sublimation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.5. Frazil Ice
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of frazil ice formation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 16. Thermodynamics --> Salt
Processes related to salt in sea ice thermodynamics.
16.1. Has Multiple Sea Ice Salinities
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 16.2. Sea Ice Salinity Thermal Impacts
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does sea ice salinity impact the thermal properties of sea ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Thermodynamics --> Salt --> Mass Transport
Mass transport of salt
17.1. Salinity Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is salinity determined in the mass transport of salt calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 17.2. Constant Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant salinity value specify this value in PSU?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the salinity profile used.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Thermodynamics --> Salt --> Thermodynamics
Salt thermodynamics
18.1. Salinity Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is salinity determined in the thermodynamic calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 18.2. Constant Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant salinity value specify this value in PSU?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the salinity profile used.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Thermodynamics --> Ice Thickness Distribution
Ice thickness distribution details.
19.1. Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is the sea ice thickness distribution represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Thermodynamics --> Ice Floe Size Distribution
Ice floe-size distribution details.
20.1. Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is the sea ice floe-size represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Please provide further details on any parameterisation of floe-size.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 21. Thermodynamics --> Melt Ponds
Characteristics of melt ponds.
21.1. Are Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are melt ponds included in the sea ice model?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21.2. Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
What method of melt pond formulation is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21.3. Impacts
Is Required: TRUE Type: ENUM Cardinality: 1.N
What do melt ponds have an impact on?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22. Thermodynamics --> Snow Processes
Thermodynamic processes in snow on sea ice
22.1. Has Snow Aging
Is Required: TRUE Type: BOOLEAN Cardinality: 1.N
Set to True if the sea ice model has a snow aging scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Snow Aging Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow aging scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22.3. Has Snow Ice Formation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.N
Set to True if the sea ice model has snow ice formation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.4. Snow Ice Formation Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow ice formation scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.5. Redistribution
Is Required: TRUE Type: STRING Cardinality: 1.1
What is the impact of ridging on snow cover?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.6. Heat Diffusion
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the heat diffusion through snow methodology in sea ice thermodynamics?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiative Processes
Sea Ice Radiative Processes
23.1. Surface Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used to handle surface albedo.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. Ice Radiation Transmission
Is Required: TRUE Type: ENUM Cardinality: 1.N
Method by which solar radiation through sea ice is handled.
End of explanation
"""
|
chungjjang80/FRETBursts | notebooks/Example - FRET histogram fitting.ipynb | gpl-2.0 | from fretbursts import *
sns = init_notebook(apionly=True)
import lmfit
print('lmfit version:', lmfit.__version__)
# Tweak here matplotlib style
import matplotlib as mpl
mpl.rcParams['font.sans-serif'].insert(0, 'Arial')
mpl.rcParams['font.size'] = 12
%config InlineBackend.figure_format = 'retina'
"""
Explanation: Example - FRET histogram fitting
This notebook is part of smFRET burst analysis software FRETBursts.
In this notebook shows how to fit a FRET histogram.
For a complete tutorial on burst analysis see
FRETBursts - us-ALEX smFRET burst analysis.
End of explanation
"""
url = 'http://files.figshare.com/2182601/0023uLRpitc_NTP_20dT_0.5GndCl.hdf5'
download_file(url, save_dir='./data')
full_fname = "./data/0023uLRpitc_NTP_20dT_0.5GndCl.hdf5"
d = loader.photon_hdf5(full_fname)
loader.alex_apply_period(d)
d.calc_bg(bg.exp_fit, time_s=1000, tail_min_us=(800, 4000, 1500, 1000, 3000))
d.burst_search(L=10, m=10, F=6)
ds = d.select_bursts(select_bursts.size, add_naa=True, th1=30)
"""
Explanation: Get and process data
End of explanation
"""
model = mfit.factory_three_gaussians()
model.print_param_hints()
"""
Explanation: Fitting the FRET histogram
We start defining the model. Here we choose a 3-Gaussian model:
End of explanation
"""
model.set_param_hint('p1_center', value=0.1, min=-0.1, max=0.3)
model.set_param_hint('p2_center', value=0.4, min=0.3, max=0.7)
model.set_param_hint('p2_sigma', value=0.04, min=0.02, max=0.18)
model.set_param_hint('p3_center', value=0.85, min=0.7, max=1.1)
"""
Explanation: The previsou cell prints all the model parameters.
Each parameters has an initial value and bounds (min, max).
The column vary tells if a parameter is varied during the fit
(if False the parameter is fixed).
Parameters with an expression (Expr column) are not free but
the are computed as a function of other parameters.
We can modify the paramenters constrains as follows:
End of explanation
"""
E_fitter = bext.bursts_fitter(ds, 'E', binwidth=0.03)
E_fitter.fit_histogram(model=model, pdf=False, method='nelder')
E_fitter.fit_histogram(model=model, pdf=False, method='leastsq')
dplot(ds, hist_fret, show_model=True, pdf=False);
# dplot(ds, hist_fret, show_model=True, pdf=False, figsize=(6, 4.5));
# plt.xlim(-0.1, 1.1)
# plt.savefig('fret_hist_fit.png', bbox_inches='tight', dpi=200, transparent=False)
"""
Explanation: Then, we fit and plot the model:
End of explanation
"""
res = E_fitter.fit_res[0]
res.params.pretty_print()
"""
Explanation: The results are in E_fitter:
End of explanation
"""
res.values
"""
Explanation: To get a dictionary of values:
End of explanation
"""
print(res.fit_report(min_correl=0.5))
"""
Explanation: This is the startndard lmfit's fit report:
End of explanation
"""
ci = res.conf_interval()
lmfit.report_ci(ci)
"""
Explanation: The previous cell reports error ranges computed from the covariance matrix.
More accurare confidence intervals
can be obtained with:
End of explanation
"""
E_fitter.params
"""
Explanation: Tidy fit results
It is convenient to put the fit results in a DataFrame for further analysis.
A dataframe of fitted parameters is already in E_fitter:
End of explanation
"""
import pybroom as br
df = br.tidy(res)
df
"""
Explanation: With pybroom we can get a "tidy" DataFrame
with more complete fit results:
End of explanation
"""
df.loc[df.name.str.contains('center')]
df.loc[df.name.str.contains('sigma')]
"""
Explanation: Now, for example, we can easily select parameters by name:
End of explanation
"""
|
pxcandeias/py-notebooks | reliability.ipynb | mit | % matplotlib inline
import sys
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib as mpl
from scipy.stats import norm, lognorm
from scipy.integrate import quad
import matplotlib.pyplot as plt
print(sys.version)
for module in (np, sp, pd, mpl):
print('{:.<15}{}'.format(module.__name__, module.__version__))
"""
Explanation: <a id='top'></a>
Structural reliability analysis
In this notebook we will explore the intricacies of performing structural reliability analysis.
Table of contents
Preamble
Poisson distributions
Conclusions
References
Odds and ends
Computational lab setup
End of explanation
"""
capacity = norm(loc=1.5, scale=0.1) # capacity
demand = norm(loc=1.0, scale=0.1) # demand
x = np.linspace(demand.ppf(0.001), capacity.ppf(0.999), 100)
fig, ax = plt.subplots()
ax.plot(x, capacity.pdf(x), label='capacity', color='r')
ax.plot(x, demand.pdf(x), label='demand', color='g')
ax.axvline(capacity.stats(moments='m'), linestyle='--', color='r')
ax.axvline(demand.stats(moments='m'), linestyle='--', color='g')
ax.fill_between(x, 0., np.minimum(capacity.pdf(x),demand.pdf(x)))
ax.set_xlabel('x')
ax.set_ylabel('$\phi(x)$')
ax.legend()
"""
Explanation: Back to top
Basic reliability problem
Describe problem here!
Failure probability computation
$$PF = \int_{-\infty}^{+\infty}\phi_{demand}(x)\cdot\Phi_{capacity}(x)\;dx = \int_{-\infty}^{+\infty}\left[1-\Phi_{demand}(x)\right]\cdot\phi_{capacity}(x)\;dx$$
Back to top
First example: normally distributed random variables
Consider a very simple case where both capacity and demand (measured in terms of action-effects) are represented by independent random variables with a normal distribution.
End of explanation
"""
def f1(x):
"""Objective function for numerical quadrature."""
return demand.pdf(x)*capacity.cdf(x)
val, err = quad(f1, demand.ppf(0.001), capacity.ppf(0.999))
print('Value = {:1.6e}, error = {:1.6e}'.format(val, err))
fig, ax = plt.subplots()
ax1 = ax.twinx() # secondary axis for capacity
ax.plot(x, demand.pdf(x), label='$\phi_{demand}(x)$', color='g')
ax1.plot(x, capacity.cdf(x), label='$\Phi_{capacity}(x)$', color='r')
ax.axvline(demand.stats(moments='m'), linestyle='--', color='g')
ax1.axvline(capacity.stats(moments='m'), linestyle='--', color='r')
ax1.plot((capacity.stats(moments='m'),x[-1]),(0.5, 0.5), linestyle='--', color='r')
ax.set_xlabel('x')
ax.set_ylabel('$\phi_{demand}(x)$', color='g')
ax.tick_params('y', colors='g')
ax1.set_ylabel('$\Phi_{capacity}(x)$', color='r')
ax1.tick_params('y', colors='r')
"""
Explanation: First numerical solution:
$$PF = \int_{-\infty}^{+\infty}\phi_{demand}(x)\cdot\Phi_{capacity}(x)\;dx$$
End of explanation
"""
def f2(x):
"""Objective function for numerical quadrature."""
# return (1.-demand.cdf(x))*capacity.pdf(x)
return demand.sf(x)*capacity.pdf(x) # sf is the survival function
val, err = quad(f2, demand.ppf(0.001), capacity.ppf(0.999))
print('Value = {:1.6e}, error = {:1.6e}'.format(val, err))
fig, ax = plt.subplots()
ax1 = ax.twinx() # secondary axis for capacity
ax.plot(x, 1.-demand.cdf(x), label='$1-\Phi_{demand}(x)$', color='g')
ax1.plot(x, capacity.pdf(x), label='$\phi_{capacity}(x)$', color='r')
ax.axvline(demand.stats(moments='m'), linestyle='--', color='g')
ax1.axvline(capacity.stats(moments='m'), linestyle='--', color='r')
ax.plot((x[0], demand.stats(moments='m')),(0.5, 0.5), linestyle='--', color='g')
ax.set_xlabel('x')
ax.set_ylabel('$1-\Phi_{demand}(x)$', color='g')
ax.tick_params('y', colors='g')
ax1.set_ylabel('$\phi_{capacity}(x)$',color='r')
ax1.tick_params('y', colors='r')
"""
Explanation: Second numerical solution:
$$PF = \int_{-\infty}^{+\infty}\left[1-\Phi_{demand}(x)\right]\cdot\phi_{capacity}(x)\;dx$$
End of explanation
"""
miu = capacity.mean()-demand.mean()
sigma = np.sqrt(capacity.std()**2+demand.std()**2)
print('miu = {:1.6e}, sigma = {:1.6e}'.format(miu, sigma))
beta = miu/sigma
print('beta = {:1.6}'.format(beta))
PF = norm.cdf(-beta) # probability of failure
print('Value = {:1.6e}'.format(PF))
"""
Explanation: For this first case there is a closed form solution for the failure probability ($PF$):
$$PF = \Phi(\frac{0-\mu}{\sigma}) = \Phi(-\beta)$$
where $\beta=\frac{\mu}{\sigma}$ is called the safety index.
See pages 47-48 of:
http://www.km.fgg.uni-lj.si/coste24/data/coimbradocuments/coimbra-faber.pdf
End of explanation
"""
capacity2 = lognorm(loc=1.5, s=0.1) # capacity
demand2 = lognorm(loc=1.0, s=0.1) # demand
x = np.linspace(demand2.ppf(0.001), capacity2.ppf(0.999), 100)
fig, ax = plt.subplots()
ax.plot(x, capacity2.pdf(x), label='capacity', color='r')
ax.plot(x, demand2.pdf(x), label='demand', color='g')
ax.axvline(capacity2.stats(moments='m'), linestyle='--', color='r')
ax.axvline(demand2.stats(moments='m'), linestyle='--', color='g')
ax.fill_between(x, 0., np.minimum(capacity2.pdf(x),demand2.pdf(x)))
ax.set_xlabel('x')
ax.set_ylabel('$\phi(x)$')
ax.legend()
"""
Explanation: Second example: lognormally distributed random variables
Now consider a similar problem but with lorgnormally distributed random variables:
End of explanation
"""
def f1(x):
"""Objective function for numerical quadrature."""
return demand2.pdf(x)*capacity2.cdf(x)
val, err = quad(f1, demand2.ppf(0.001), capacity2.ppf(0.999))
print('Value = {:1.6e}, error = {:1.6e}'.format(val, err))
fig, ax = plt.subplots()
ax1 = ax.twinx() # secondary axis for capacity
ax.plot(x, demand2.pdf(x), label='$\phi_{demand}(x)$', color='g')
ax1.plot(x, capacity2.cdf(x), label='$\Phi_{capacity}(x)$', color='r')
ax.axvline(demand2.stats(moments='m'), linestyle='--', color='g')
ax1.axvline(capacity2.stats(moments='m'), linestyle='--', color='r')
ax1.plot((capacity2.stats(moments='m'),x[-1]),(0.5, 0.5), linestyle='--', color='r')
ax.set_xlabel('x')
ax.set_ylabel('$\phi_{demand}(x)$', color='g')
ax.tick_params('y', colors='g')
ax1.set_ylabel('$\Phi_{capacity}(x)$', color='r')
ax1.tick_params('y', colors='r')
"""
Explanation: First numerical solution:
$$PF = \int_{-\infty}^{+\infty}\phi_{demand}(x)\cdot\Phi_{capacity}(x)\;dx$$
End of explanation
"""
def f2(x):
"""Objective function for numerical quadrature."""
# return (1.-demand2.cdf(x))*capacity2.pdf(x)
return demand2.sf(x)*capacity2.pdf(x) # sf is the survival function
val, err = quad(f2, demand2.ppf(0.001), capacity2.ppf(0.999))
print('Value = {:1.6e}, error = {:1.6e}'.format(val, err))
"""
Explanation: Second numerical solution:
$$PF = \int_{-\infty}^{+\infty}\left[1-\Phi_{demand}(x)\right]\cdot\phi_{capacity}(x)\;dx$$
End of explanation
"""
cov_C = capacity2.std()/capacity2.mean()
cov_D = demand2.std()/demand2.mean()
miu = np.log(capacity2.mean()/demand2.mean()*np.sqrt((1+cov_D**2)/(1+cov_C**2)))
sigma = np.sqrt(np.log((1+cov_D**2)*(1+cov_C**2)))
print('miu = {:1.6e}, sigma = {:1.6e}'.format(miu, sigma))
beta = miu/sigma
print('beta = {:1.6}'.format(beta))
PF = norm.cdf(-beta) # probability of failure
print('Value = {:1.6e}'.format(PF))
"""
Explanation: For this second case there is also a closed form solution for the failure probability ($PF$):
$$PF = \Phi(\frac{0-log\left(\frac{\mu_C}{\mu_D}\sqrt{\frac{1+COV_D^2}{1+COV_C^2}}\right)}{\sqrt{log\left[\left(1+COV_D^2\right)\left(1+COV_C^2\right)\right]}}) = \Phi(-\beta)$$
where $COV=\frac{\sigma}{\mu}$ is the covariance and $\beta=\frac{\mu}{\sigma}$ is called the safety index.
See pages 47-48 of:
http://www.km.fgg.uni-lj.si/coste24/data/coimbradocuments/coimbra-faber.pdf
End of explanation
"""
mu_C = 15.
COV_C = 0.2
sigma_C = np.log1p(COV_C)
C = lognorm(sigma_C, loc=0, scale=mu_C) # capacity
print('C_median={}, C_mean={}'.format(C.ppf(0.5), C.stats(moments='m')))
mu_D = 10.
COV_D = 0.1
sigma_D = np.log1p(COV_D)
D = lognorm(sigma_D, loc=0, scale=mu_D) # demand
print('D_median={}, D_mean={}'.format(D.ppf(0.5), D.stats(moments='m')))
x = np.linspace(D.ppf(1e-12), C.ppf(0.9999), 100)
#x = np.linspace(0.01, 30, 1000)
fig, ax = plt.subplots()
ax.plot(x, C.pdf(x), label='$\phi_{C}(x)$', color='r')
ax.plot(x, D.cdf(x), label='$\Phi_{D}(x)$', color='g')
ax.axvline(mu_C, linestyle='--', color='r')
ax.axvline(mu_D, linestyle='--', color='g')
ax.grid(b=True)
ax.legend()
ax.set_xlabel('x')
ax.set_ylabel('$\Phi_D(x)/\phi_C(x)$')
def f1(x):
"""Objective function for numerical quadrature."""
return D.cdf(x)*C.pdf(x)
def f2(x):
"""Objective function for numerical quadrature."""
return D.pdf(x)*C.sf(x)
val, err = quad(f1, D.ppf(1e-12), C.ppf(0.9999))
print('Value = {:.3f}, error = {:1.6e}'.format(val, err))
val, err = quad(f2, D.ppf(0.001), C.ppf(0.999))
print('Value = {:.3f}, error = {:1.6e}'.format(val, err))
num = np.log((mu_C/np.sqrt(1.+COV_C**2))/(mu_D/np.sqrt(1.+COV_D**2)))
den = np.sqrt(np.log((1.+COV_D**2)*(1.+COV_C**2)))
R = 1.-norm.cdf(-num/den)
print(R)
"""
Explanation: Reliability index
To be done!
Example from:
http://www.springer.com/cda/content/document/cda_downloaddocument/9783319209456-c2.pdf%3FSGWID%3D0-0-45-1544683-p177543534&usd=2&usg=AFQjCNFO1huDK6zcCJeCqWsgSdD6EqkSMg
Definition of COV taken from:
https://en.wikipedia.org/wiki/Log-normal_distribution
End of explanation
"""
|
QuantEcon/QuantEcon.notebooks | rmt3_ch11.ipynb | bsd-3-clause | # imports and workspace setup
%matplotlib inline
from dolo import yaml_import, pcat
import dolo.algos.dtcscc.perfect_foresight as pf
from dolo.algos.dtcscc.steady_state import find_deterministic_equilibrium
from dolo.misc.graphs import plot_irfs
import numpy as np
import matplotlib.pyplot as plt
plt.style.use("ggplot")
"""
Explanation: Perfect foresight experiments with dolo
Fiscal policy experiments in a non-stochastic model
Spencer Lyon NYU Stern Economics
Pablo Winant Bank of England
February 2016
End of explanation
"""
url = "https://raw.githubusercontent.com/EconForge/dolo/master/examples/models/rmt3_ch11.yaml"
pcat(url)
"""
Explanation: In this notebook we will explore the effects of techonology and fiscal policy shocks in a nonstochastic model. The model and figures are inspired by chapter 11 of Recursive Macroeconomic Theory 3rd edition (RMT3) by Ljungqvist and Sargent.
We will focus on the computational aspects of the exercises and will refer the interested reader to section 11.9 of RMT3 for a discussion on the underlying economics.
Model
A representative household has preferences over consumption that are ordered by
$$U := \sum_{t=0}^{\infty} \beta^t \frac{c_t^{1-\gamma}}{1-\gamma},$$
where $\beta = 0.95$ is the agent's discount factor and $\gamma$ is the coeffiicent of relative risk aversion.
A perfectly competitive representative firm rents capital $k_t$ from the household and produces using the production function $f(k) = A k^{\alpha}$. The aggregate technology in the economy is
$$g_t + c_t + k_{t+1} \le Ak_t^{\alpha} + (1-\delta) k_t,$$
where $g_t$ is government spending, $A$ is a constant productivity factor, and $\delta$ is the depreciation rate of capital.
The consumer faces two taxes: a consumption tax $\tau_{ct}$ and tax on earnings to capital $\tau_{kt}$. The household budget constraint is
$$ \sum_{t=0}^{\infty} q_t \left{(1 + \tau_{ct}) c_t + [k_{t+1} - (1 - \delta) k_t] \right} \le \sum_{t=0}^{\infty} q_t \left{\eta_t k_t -\tau_{kt} (\eta_t - \delta) k_t + w_t\right}.$$
Here $q_t$ is the time zero price of one unit of consumption in period $t$, $\eta_t$ is the pre-tax price households recieve by lending capital to firms in time $t$, and $w_t$ is the time $t$ wage households earn on the inelastically supplied one unit of labor.
The government faces the budget constraint
$$ \sum_{t=0}^{\infty} q_t g_t \le \sum_{t=0}^{\infty} q_t \left{\tau_{ct} c_t + \tau_{kt} (\eta_t - \delta) k_t \right}$$
Equilibrium conditions
A competitive equilibrium is
a budget-feasible government policy $\left{g_t, \tau_{ct}, \tau_{kt} \right}_{t=0}^{\infty}$
a feasible allocation $\left{c_t, k_{t+1}\right}_{t=0}^{\infty}$
and prices $\left{q_t \right}_{t=0}^{\infty}$
such that
given prices and the government policy, the allocation solves the household and firm problems.
Firm optimization
The firm's problem is very simple:
$$\max_{k_t} \sum_{t=0}^{\infty} q_t \left[A k_t^{\alpha} - \eta_t k_t \right].$$
The zero-profit condition is $\eta_t = A \alpha k_t^{\alpha-1}$.
Household optimization
The problem of the household is to maximize $U$ subject to the budget constraint, taking prices and the government policy as given.
Assuming an interior solution and that the budget constraint holds with equality, the first order necessary condition of the household can be summarized by an Euler equation
$$1 = \beta \left(\frac{c_{t+1}}{c_t}\right)^{-\gamma} \frac{(1+\tau_{ct})}{(1+\tau_{ct+1})} \left[(1 - \tau_{kt+1})(\alpha A k_{t+1}^{\alpha-1} - \delta) + 1 \right]$$
and a law of motion for capital
$$k_{t+1} = A k_{t}^{\alpha} + (1 - \delta) k_t - g_t - c_t.$$
Prices and government policy
Imposing a no arbitrage condition on the household allows one to derive the following condition on prices (see RMT3 section 11.5.1 for more details):
$$\frac{q_t}{q_{t+1}} = \left[(1 - \tau_{kt+1}(\eta_{t+1} - \delta) + 1 \right].$$
After a normalization that $q_0 = 1$, the above equation pins down the sequence of prices.
In the experiments below we will assume that the goverment policy is exogenously given and solve for how the solutions to the household and firms problems adjust to shocks to the government policy.
Other equilibrium conditions
There are a few other variables of interest.
$$\begin{align}
\eta_t &= \alpha A k_t^{\alpha-1} \
w_t &= A k_t^{\alpha} - k_t \eta_t \
\bar{R}{t+1} &= \frac{(1+\tau{ct})}{(1+\tau_{ct+1})} \left[(1 - \tau_{kt+1})(\alpha A k_{t+1}^{\alpha-1} - \delta) + 1 \right]
\end{align}$$
Here $w_t$ is the wage rate the firm must pay to the household. Above we wrote the firm's problem with capital as the only input into production. All the equilibrium conditions above are still correct if we instead assume that the firm operates a constant returns to technology $F(k, n)$ and assume that the household inelastically supplies a single unit of labor and is paid a wage of $w_t$.
$\bar{R}_{t+1}$ is rate at which the price and taxes allow the conusmer to substitute consumption in period $t$ for consumption in period $t+1$.
Experiments
We will do a number of experiments and analyze the transition path for the equilibrium in each case:
A foreseen once-and-for-all increase in $g$ from 0.2 to 0.4 in period 10.
A foreseen once-and-for-all increase in $\tau_c$ from 0.0 to 0.2 in period 10.
A foreseen once-and-for-all increase in $\tau_k$ from 0.0 to 0.2 in period 10.
A foreseen one-time increase in $g$ from 0.2 to 0.4 in period 10, after which $g$ returns to 0.2 forever
Enter dolo
Let's perform these experiments with dolo.
This will allow us to cleanly separate the model definition from the code
used to exploit the results. General guidelines on how to write models are available
here.
Here's the dolo version of the model that we will be using.
End of explanation
"""
model = yaml_import(url)
model
"""
Explanation: At this stage, a few comments are in order. First, the model under consideration is set-up in discrete time, and the optimization problem conists in choosing consumption (a continuous choice) as a function of the level of capital (a continuous choice too). Hence, in dolo's classification, it is referred to as a Discrete Time Continuous States Continuous Controls (DTCSCC) model. Several algorithms are available to solve that kind of model, all accessible from http://dolo.readthedocs.org/en/doc/algos_dtcscc.html.
The general formulation of a dtcscc model, specifies a controlled process:
$$s_t = g(s_{t-1}, x_{t-1}, \epsilon_t)$$
where $s_t$ is a vector of states, $x_t$ a vector of controls taken at each state, and $\epsilon_t$ the driving exogenous process. This equation is defined in the equations:transition block.
For our model, there is essentially one state $k_t$ and one control $c_t$. Note that in this particular case choosing $c_t$ is equivalent to choosing $k_{t+1}$, but this is not true in general, for instance if there are many control for one state.
Notice in the model file the addition of tau_c and tau_k as state variables. These dummy states track the innovations exog_tau_c, exog_tau_k. This was necessary in order to to have tau_c in both period t and period t+1 in the Euler equation defined in the block equations:arbitrage whose conventional definition is:
$$0 = E_t \left[ f(s_t, x_t, s_{t+1}, x_{t+1}) \right]$$
In this note we are interested in the predictible effect of a preannounced government policy on the decisions by the representative agent, not in a time-invariant decision rule. Hence we will use the dolo.algos.dtcscc.perfect_foresight module, which we imported at the beginning of this notebook. This module implements the stacked time algorithm for solving determinsitc problems. A description of this algorithm can be found here.
In a perfect foresight simulation, the computational cost of adding additional states is relatively low, because one does not need to solve for the optimal decisions at each point of the state space. For more information on which variables can appear at what times in each type of equation, see the dolo model classification .
Let's now import the model and display it. The display shows the residuals of each equation, evaluated at the values specified in the calibration section of the modfile.
End of explanation
"""
shocks_1 = {"g": [0.2]*9+[0.4]}
shocks_2 = {"exog_tau_c": [0.0]*9+[0.2]}
shocks_3 = {"exog_tau_k": [0.0]*9+[0.2]}
shocks_4 = {"g": [0.2]*9+[0.4, 0.2]}
# also specify how long to simulate and plot
T = 101 # simulation length
p_T = 40; # Periods to plot
"""
Explanation: Now let's construct some dictionaries to hold the shocks in our experiments.
In the deterministic_solve function, simulations last for T periods. dolo assumes that if a given time-series of shocks is less than T in length, that the corresponding shock will hold its last given value until period T. Thus, to implement the once-and-for-all increase in our exogenous variables we simply need to pass the values before the increase and a single instance of the value after the increase.
We do this below.
End of explanation
"""
# output is a pandas.DataFrame containing the equilibrium
# time series of each variable in the model
sol = pf.deterministic_solve(model, shocks=shocks_1,
T=T, ignore_constraints=True)
sol.head(10)
"""
Explanation: Experiment 1 (RMT 3 Figure 11.9.1)
Now we are ready to do the experiment corresponding to the once-and-for-all increase to $g$ in period $10$.
First, we solve for the equilibrium transition in this experiment
End of explanation
"""
# produce solution assuming no change in any shocks
# this will keep the model in the steady state the entire time
sol_ref = pf.deterministic_solve(model, T=T, ignore_constraints=True).ix[:p_T]
"""
Explanation: For comparison, we will also want to solve for the equilibrium path assuming no chagnes to any of the government variables ($g=0.2$, $\tau_c=0$, and $\tau_k=0$ forever)
End of explanation
"""
Rbar_formula = "(1+tau_c)/(1+tau_c(1))*((1-tau_k(1)) * (alpha*A*k(1)**(alpha-1) - delta) + 1.)"
sol["Rb"] = model.eval_formula(Rbar_formula, dataframe=sol)
sol_ref["Rb"] = model.eval_formula(Rbar_formula, dataframe=sol_ref)
# notice the new column
sol_ref.head()
"""
Explanation: We will want to show plots of $\bar{R}_{t,t+1}$, but notice that we did not include it in the yaml file above.
What we will do is use the eval_formula method on our model object, which will allow us to pass a string containing the dolo equation representing $\bar{R}_{t,t+1}$ and the time series received from the deterministic_solve function.
End of explanation
"""
# These commented out lines are how we could solve for the setady state numerically
# from dolo.algos.dtcscc.steady_state import find_deterministic_equilibrium
# ss = find_deterministic_equilibrium(model)
ss = model.calibration
kbar, cbar, etabar, wbar, gbar = ss["k", "c", "eta", "w", "g"]
Rbar = sol_ref.loc[0, "Rb"] # steady state is also in sol_ref
"""
Explanation: We will also want to show the steady state. We could solve for it numerically,
but since the model is simple enough, the calibrated values given with
the model, already solve it in closed form.
End of explanation
"""
# Set up plotting materials
o = np.ones(p_T)
names = [["k", "c", "Rb"], ["eta", "g", "w"]]
titles = [["k", "c", r"$\bar{R}$"], [r"$\eta$", "g", "w"]]
ss_vals = [[kbar, cbar, Rbar], [etabar, gbar, wbar]]
# Generate plots
psol = sol.ix[:p_T]
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(8, 8))
for ind_i, i in enumerate(names):
for ind_k, k in enumerate(i):
ax_ik = axes[ind_i, ind_k]
psol[k].plot(ax=ax_ik, linewidth=2, title=titles[ind_i][ind_k])
ax_ik.plot(o * ss_vals[ind_i][ind_k], 'k--')
axes[1,1].set_ybound(.18, .42)
fig.suptitle('RMT4 Figure 11.9.1', fontsize=18, y=1.05)
fig.tight_layout()
"""
Explanation: Now all the computations are done, we will work on constructing figure 11.9.1 in RMT3.
Note that we will first construct the plot by hand, which will require some effort on our part. Then below we will show how to leverage convenience functions in dolo to make that task easier.
End of explanation
"""
# construct dictionary of plot keyword arguments.
# We do this so we can re-use them below.
# line_options is applied to all lines in a particular DataFrame
plot_kwargs = dict(variables=['k','c','Rb','eta','g', 'w'],
line_options=[
{'color':'black', 'linestyle':'--'},
{'linewidth':2},
{'linewidth':2, "linestyle": "-."},
],
titles=['k', 'c', r'$\bar{R}$', r'$\eta$', 'g', 'w'])
fig = plot_irfs([sol_ref, psol], **plot_kwargs)
fig.suptitle('RMT4 Figure 11.9.1', fontsize=18, y=1.05)
fig.tight_layout()
"""
Explanation: Constructing that plot was a lot of book-keeping work.
Thankfully there is a convenient function in dolo that will do most of that work for us, while still allowing us to customize and tweak the figure to our liking. Here's another way we might have created the figure above.
End of explanation
"""
# Change gamma and compute solution again
model.set_calibration('gamma', 0.2)
sol_ies = pf.deterministic_solve(model, shocks=shocks_1, T=T, ignore_constraints=True)
sol_ies["Rb"] = model.eval_formula(Rbar_formula, dataframe=sol_ies)
psol_ies = sol_ies.ix[:p_T]
# generate figure
fig = plot_irfs([sol_ref, psol, psol_ies], **plot_kwargs)
fig.suptitle('RMT4 Figure 11.9.2', fontsize=18, y=1.05)
fig.tight_layout()
"""
Explanation: This was a much easier way to construct the same plot. We will use this approach going forward.
Change in IES (RMT3 Figure 11.9.2)
We now want to highlight the impact of the parameter $\gamma$ in the household's preferences. The intertemporal elasticity of substitution (IES) for these preferences is given by $1/\gamma$. This means that higher values of $\gamma$ decrease IES, making the household less willing to substitute consumption across time.
Let's perform the same experiment as above, but this time with $\gamma$ set to $0.2$ instead of $2.0$.
End of explanation
"""
# reset gamma
model.set_calibration("gamma", 2.0)
# helper function to make it easier to add variables
def add_variable(name, formula, dfs=[sol, sol_ref]):
for df in dfs:
df[name] = model.eval_formula(formula, dataframe=df)
sol["c_0"] = sol["c"][0]
sol_ref["c_0"] = sol_ref["c"][0]
add_variable("q", "beta**(t+1)*c**(-gamma)/(c_0**(-gamma))")
add_variable("qbar", "beta**t")
add_variable("R", "(1-tau_k(1)) * (alpha*A*k(1)**(alpha-1) - delta) + 1")
add_variable("r", "R-1")
# now construct yield curve at t=0, t=10, t=60
s = np.arange(p_T)
nans = np.ones(sol.shape[0]-p_T)*np.nan
q = np.array(sol["q"])
for t in [0, 10, 60]:
rs = np.log(q[s+t]/q[t]) /(-s)
# pad the array with NaN so we can add as a column to df
sol["rs_{0}".format(t)] = np.concatenate([rs, nans])
"""
Explanation: In the figure above the solid red line represents the experiment when $\gamma=2.0$, the dash-dotted blue line tracks the equilibrium in the experiment when $\gamma=0.2$, and the black dashed line tracks the steady state.
Notice that because the household is more willing to move consumption across time when $\gamma$ is smaller, the movement from initial to final levels of consumption is both larger and quicker. This lets the consumption stream "swallow" most of the impact of the increase in $g$ and capital doesn't respond as much (think about the household budget constraint). Factor prices and $\bar{R}$ are functions of the captial stock, so they also do not respond as sharply.
Prices and interest rates (RMT3 Figure 11.9.3)
We will now return to the case where $\gamma=2.0$ and take a closer look at prices and interest rates in this economy.
We say that $R_{t,t+1}$ is the gross one period interest rate between $t$ and $t+1$. In equilibrium it can be written:
$$R_{t,t+1}= \left[(1 - \tau_{kt+1})(\alpha A k_{t+1}^{\alpha-1} - \delta) + 1 \right]$$.
We define $r_{t,t+1} := R_{t,t+1} - 1$ as the corresponding net interest rate.
Finally we define $r_{t,t+s}$ to be the net $s$ period interest rate between periods $t$ and $t+s$. It is defined as
$$r_{t,t+s} = \frac{1}{s} \left(r_{t,t+1} + r_{t+1,t+2} + \cdots + r_{t+s-1,t+s} \right) = - \frac{1}{s} \log \left(\frac{q_{t+s}}{q_t}\right).$$
A plot of $r_{t,t+s}$ as $s$ increases is called the real yield curve at $t$. Below we will plot the yield curve at $t=0,10,60$.
Let's construct each of those objects now.
End of explanation
"""
# note, this is a little hack to make it so we can use the
# handy `plot_irf` functions to get the yield curve at t=0 and t=60
sol_ref["rs_0"] = sol["rs_60"]
# now construct figure
fig = plot_irfs([sol_ref, sol], variables=["c", "q", "r", "rs_0", "g"],
line_options=plot_kwargs["line_options"],
titles=["c", "q", r"$r_{t,t+1}$", r"$r_{t,t+s}$", "g"]);
# add in the t=10 yeild curve and a title
sol["rs_10"][:p_T].plot(ax=fig.axes[3], **plot_kwargs["line_options"][2])
fig.suptitle('RMT4 Figure 11.9.3', fontsize=18, y=1.05);
fig.tight_layout()
"""
Explanation: Now we are ready to construct the plot we are after.
End of explanation
"""
sol2 = pf.deterministic_solve(model, shocks=shocks_2,
T=T, ignore_constraints=True)
sol2["Rb"] = model.eval_formula(Rbar_formula, dataframe=sol2)
psol2 = sol2.ix[:p_T]
fig = plot_irfs([sol_ref, psol2], variables=["k", "c", "Rb", "eta", "tau_c"],
titles=["k", "c", r"$\bar{R}$", r"$\eta$", r"$\tau_c$"],
line_options=plot_kwargs["line_options"])
fig.suptitle('RMT4 Figure 11.9.4', fontsize=18, y=1.05);
fig.tight_layout()
"""
Explanation: Consumption tax shocks (RMT 3 Figure 11.3.4)
Now let's consider the second experiment: a once-and-for-all increase in $\tau_c$ from 0 to 0.2 in period 10.
End of explanation
"""
sol3 = pf.deterministic_solve(model, shocks=shocks_3,
T=T, ignore_constraints=True)
sol3["Rb"] = model.eval_formula(Rbar_formula, dataframe=sol3)
psol3 = sol3.ix[:p_T]
# here we also want to look at the impact of changing the ies
model.set_calibration("gamma", 0.2)
sol3_ies = pf.deterministic_solve(model, shocks=shocks_3,
T=T, ignore_constraints=True)
sol3_ies["Rb"] = model.eval_formula(Rbar_formula, dataframe=sol3_ies)
psol3_ies = sol3_ies.ix[:p_T]
model.set_calibration("gamma", 2.0) # reset gamma
fig = plot_irfs([sol_ref, psol3, psol3_ies], variables=["k", "c", "Rb", "eta", "tau_k"],
titles=["k", "c", r"$\bar{R}$", r"$\eta$", r"$\tau_k$"],
line_options=plot_kwargs["line_options"])
fig.suptitle('RMT4 Figure 11.9.5', fontsize=18, y=1.05);
fig.tight_layout()
"""
Explanation: Capital tax shocks (RMT 3 Figure 11.9.5)
Next, we turn to the third experiment: a once-and-for-all increase in $\tau_k$ from 0 to 0.2 in period 10
End of explanation
"""
sol4 = pf.deterministic_solve(model, shocks=shocks_4,
T=T, ignore_constraints=True)
sol4["Rb"] = model.eval_formula(Rbar_formula, dataframe=sol4)
psol4 = sol4.ix[:p_T]
fig = plot_irfs([sol_ref, psol4], variables=["k", "c", "Rb", "eta", "g"],
titles=["k", "c", r"$\bar{R}$", r"$\eta$", "g"],
line_options=plot_kwargs["line_options"])
fig.suptitle('RMT4 Figure 11.9.6', fontsize=18, y=1.05)
fig.tight_layout()
"""
Explanation: Impulse shock to g (RMT 3 Figure 11.9.6)
Finally, we turn to the fourth experiment: a one-time shock to $g$ from 0.2 to 0.4 in period 10, then back to 0.2 forever.
End of explanation
"""
|
phoebe-project/phoebe2-docs | development/examples/legacy_spots.ipynb | gpl-3.0 | #!pip install -I "phoebe>=2.4,<2.5"
"""
Explanation: Comparing Spots in PHOEBE 2 vs PHOEBE Legacy
Setup
Let's first make sure we have the latest version of PHOEBE 2.4 installed (uncomment this line if running in an online notebook session such as colab).
End of explanation
"""
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
"""
Explanation: As always, let's do imports and initialize a logger and a new bundle.
End of explanation
"""
b.add_spot(component='primary', relteff=0.8, radius=20, colat=45, colon=90, feature='spot01')
b.add_dataset('lc', times=np.linspace(0,1,101))
b.add_compute('phoebe', irrad_method='none', compute='phoebe2')
b.add_compute('legacy', irrad_method='none', compute='phoebe1')
"""
Explanation: Adding Spots and Compute Options
End of explanation
"""
b.set_value_all('atm', 'extern_planckint')
b.set_value_all('ld_mode', 'manual')
b.set_value_all('ld_func', 'logarithmic')
b.set_value_all('ld_coeffs', [0.0, 0.0])
b.run_compute('phoebe2', model='phoebe2model')
b.run_compute('phoebe1', model='phoebe1model')
"""
Explanation: Let's use the external atmospheres available for both phoebe1 and phoebe2
End of explanation
"""
afig, mplfig = b.plot(legend=True, ylim=(1.95, 2.05), show=True)
"""
Explanation: Plotting
End of explanation
"""
|
cliburn/sta-663-2017 | scratch/Lecture10A.ipynb | mit | def in_unit_circle(x, y):
if x**2 + y**2 < 1:
return 1
else:
return 0
@numba.vectorize('int64(float64, float64)',target='cpu')
def in_unit_circle_serial(x, y):
if x**2 + y**2 < 1:
return 1
else:
return 0
@numba.vectorize('int64(float64, float64)',target='parallel')
def in_unit_circle_multicore(x, y):
if x**2 + y**2 < 1:
return 1
else:
return 0
n = int(1e7)
xs, ys = np.random.random((2, n))
%%time
4 * np.sum(in_unit_circle(x, y) for x, y in zip(xs, ys))/n
%%time
4 * np.sum(in_unit_circle_serial(xs, ys))/n
%%time
4 * np.sum(in_unit_circle_multicore(xs, ys))/n
"""
Explanation: numpy.vectorize
End of explanation
"""
def plot_one(data, name):
xs, ys = data.T
plt.scatter(xs, ys, s=1, edgecolor=None)
plt.savefig('%s.png' % name)
return name
data = np.random.random((10, 10000, 2))
"""
Explanation: Multi-core processing
End of explanation
"""
%%time
for i, M in enumerate(data):
plot_one(M, i)
"""
Explanation: Single core
End of explanation
"""
%%time
args = [(x, i) for i, x in enumerate(data)]
with mp.Pool() as pool:
pool.starmap(plot_one, args)
%%time
args = [(x, i) for i, x in enumerate(data)]
with mp.Pool() as pool:
results = pool.starmap_async(plot_one, args)
"""
Explanation: Threads
```python
%%time
args = [(x, i) for i, x in enumerate(data)]
def plot_one_(arg):
return plot_one(*arg)
with ThreadPoolExecutor() as pool:
pool.map(plot_one_, args)
```
Processes
End of explanation
"""
%%time
Parallel(n_jobs=-1)(delayed(plot_one)(x, i) for i, x in enumerate(data))
pass
"""
Explanation: Parallel comprehensions with joblib
End of explanation
"""
def f(x):
import time
time.sleep(np.random.randint(0, 5))
return x
%%time
with mp.Pool(processes=4) as pool:
result = pool.map(f, range(10))
result
%%time
pool = mp.Pool(processes=4)
result = pool.map_async(f, range(10))
if result.ready() and result.successful():
print(result.get())
else:
print(result.wait())
"""
Explanation: Blocking and non-blocking calls
End of explanation
"""
|
psychemedia/parlihacks | notebooks/Co-Occurring Tag Analysis.ipynb | mit | #Data files
!ls ../data/dataexport
"""
Explanation: Co-Occurring Tag Analysis
Analysing how tags co-occur across various Parliamentary publications. The idea behind this is to see whether there are naturally occurring groupings of topic tags by virtue of their co-occurence when used to tag different classes of Parlimanetary publication.
The data is provided as a set of Linked Data triples exported as Turtle (.ttl) data files. The data represents, among other things, Parlimentary resources (such as early day motions or other proceedings records) and subject/topic labels they are tagged with.
The data allows us to generate a graph that associates tags with resources, and from that a graph that directly associates tags with other tags by virtue of their commonly tagging the same resource or set of resources.
End of explanation
"""
#Data is provided as Turtle/ttl files - rdflib handles those
#!pip3 install rdflib
from rdflib import Graph
"""
Explanation: Utils
Import a library that lets us work with the data files:
End of explanation
"""
import os
def ttl_graphbuilder(path,g=None,debug=False):
#We can add the triples to an existing graph or create a new one for them
if g is None:
g=Graph()
#Loop through all the files in the directory and then load the ones that have a .ttl suffix
for ttl in [f for f in os.listdir(path) if f.endswith('.ttl')]:
if debug: print(ttl)
g.parse('{}/{}'.format(path,ttl), format='turtle')
return g
"""
Explanation: Simple utility to load all the .ttl files in a particular directory into a graph:
End of explanation
"""
def rdfQuery(graph,q):
ans=graph.query(q)
for row in ans:
for el in row:
print(el,end=" ")
print()
#ish via https://github.com/schemaorg/schemaorg/blob/sdo-callisto/scripts/dashboard.ipynb
import pandas as pd
def sparql2df(graph,q, cast_to_numeric=True):
a=graph.query(q)
c = []
for b in a.bindings:
rowvals=[]
for k in a.vars:
rowvals.append(b[k])
c.append(rowvals)
df = pd.DataFrame(c)
df.columns = [str(v) for v in a.vars]
if cast_to_numeric:
df = df.apply(lambda x: pd.to_numeric(x, errors='ignore'))
return df
"""
Explanation: Tools for running queries over a graph and either printing the result or putting it into a pandas dataframe:
End of explanation
"""
import networkx as nx
"""
Explanation: Tools to support the export and display of graphs - networkx package is handy in this respect, eg exporting to GEXF format for use with Gephi. We can also run projections on the graph quite easily.
End of explanation
"""
path='../data/dataexport/terms'
termgraph=ttl_graphbuilder(path)
#What's in the graph generally?
q='''
SELECT DISTINCT ?x ?y ?z {
?x ?y ?z.
} LIMIT 10
'''
rdfQuery(termgraph,q)
#What does a term have associated with it more specifically?
q='''
SELECT DISTINCT ?y ?z {
<http://data.parliament.uk/terms/95551> ?y ?z.
} LIMIT 10
'''
rdfQuery(termgraph,q)
"""
Explanation: Exploring the Data - Terms
End of explanation
"""
q='''
SELECT DISTINCT ?z ?topic {
?z <http://www.w3.org/2004/02/skos/core#prefLabel> ?topic.
} LIMIT 10
'''
sparql2df(termgraph,q)
"""
Explanation: Looks like the prefLabel is what we want:
End of explanation
"""
path='../data/dataexport/edms'
g=ttl_graphbuilder(path)
#See what's there generally...
q='''
SELECT DISTINCT ?x ?y ?z {
?x ?y ?z.
} LIMIT 10
'''
rdfQuery(g,q)
#Explore a specific EDM
q='''
SELECT DISTINCT ?y ?z {
<http://data.parliament.uk/edms/50457> ?y ?z.
}
'''
rdfQuery(g,q)
"""
Explanation: Exploring the Data - EDMS
End of explanation
"""
path='../data/dataexport/edms'
g=ttl_graphbuilder(path,termgraph)
"""
Explanation: Let's merge the EDM graph data with the terms data.
End of explanation
"""
q='''
SELECT DISTINCT ?t ?z {
<http://data.parliament.uk/edms/50114> <http://data.parliament.uk/schema/parl#topic> ?z.
?z <http://www.w3.org/2004/02/skos/core#prefLabel> ?t.
} LIMIT 10
'''
rdfQuery(g,q)
"""
Explanation: Now we can look at the term labels associated with a particular EDM.
End of explanation
"""
q='''
SELECT DISTINCT ?edms ?topic {
?edms <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://data.parliament.uk/schema/parl#EarlyDayMotion>.
?edms <http://data.parliament.uk/schema/parl#topic> ?z.
?z <http://www.w3.org/2004/02/skos/core#prefLabel> ?topic.
}
'''
g_df=sparql2df(g,q)
g_df.head()
"""
Explanation: We can also create a table that links topic labels with EDMs.
End of explanation
"""
nxg=nx.from_pandas_dataframe(g_df, 'edms', 'topic')
#nx.write_gexf(nxg,'edms.gexf')
"""
Explanation: From this table, we can a generate a bipartite networkx graph that links topic labels with EDMs.
End of explanation
"""
from networkx.algorithms import bipartite
#We can find the sets of names/tags associated with the disjoint sets in the graph
#I think the directedness of the graph means we can be reasonably sure the variable names are correctly ordered?
edms,topic=bipartite.sets(nxg)
#Collapse the bipartite graph to a graph of topic labels connected via a common EDM
topicgraph= bipartite.projected_graph(nxg, topic)
nx.write_gexf(topicgraph,'edms_topics.gexf')
"""
Explanation: We can then project this bipartite graph onto just the topic label nodes - edges will now connect nodes that are linked through one or more common EDMs.
End of explanation
"""
topicgraph_weighted= bipartite.weighted_projected_graph(nxg, topic)
nx.write_gexf(topicgraph_weighted,'edms_topics_weighted.gexf')
"""
Explanation: We can also generate a weighted graph, where edges are weighted relative to how many times topics are linked through different EDMs.
End of explanation
"""
#!pip3 install sklearn
#via https://stackoverflow.com/a/19172087/454773
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import MultiLabelBinarizer
#via https://stackoverflow.com/questions/22219004/grouping-rows-in-list-in-pandas-groupby
g_df['topic']=g_df['topic'].astype(str)
topicsbyedm_df=g_df.groupby('edms')['topic'].apply(list).to_frame().reset_index()
topicsbyedm_df.head()
q='''
SELECT DISTINCT ?edms ?motiontext {
?edms <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://data.parliament.uk/schema/parl#EarlyDayMotion>.
?edms <http://data.parliament.uk/schema/parl#motionText> ?motiontext.
}
'''
m_df=sparql2df(g,q)
m_df=m_df.merge(topicsbyedm_df,on='edms')
m_df.head()
X_train= np.array(m_df['motiontext'][:-100].tolist())
X_test = np.array(m_df['motiontext'][-100:].tolist())
target_names=g_df['topic'].astype(str).tolist()
target_names[:3]
#ytrain= [[target_names.index(i) for i in t] for t in m_df['topic'][:-100] ]
#ytrain[:3]
y_train_text = [ t for t in m_df['topic'][:-100] ]
y_train_text[:3]
mlb = MultiLabelBinarizer()
Y = mlb.fit_transform(y_train_text)
classifier = Pipeline([
('vectorizer', CountVectorizer(analyzer='word',stop_words='english')),
('tfidf', TfidfTransformer()),
('clf', OneVsRestClassifier(LinearSVC()))])
classifier.fit(X_train, Y)
predicted = classifier.predict(X_test)
all_labels = mlb.inverse_transform(predicted)
hits=[]
misses=[]
for item, labels in zip(X_test, all_labels):
if labels!=(): hits.append('{0} => {1}'.format(item, ', '.join(labels)))
else: misses.append('{0} => {1}'.format(item, ', '.join(labels)))
print("some hits:\n{}\n\nsome misses:\n{}".format('\n'.join(hits[:3]),'\n'.join(misses[:3])))
labels
"""
Explanation: Predicting Topics
End of explanation
"""
path='../data/dataexport/proceedings'
p=ttl_graphbuilder(path,debug=True)
!ls {path}
!cat {path}/0006D323-D0B5-4E22-A26E-75ABB621F58E.ttl
"""
Explanation: Exploring the Data - proceedings
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive/03_tensorflow/labs/d_traineval.ipynb | apache-2.0 | !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.5
from google.cloud import bigquery
import tensorflow as tf
import numpy as np
import shutil
print(tf.__version__)
"""
Explanation: <h1> 2d. Distributed training and monitoring </h1>
In this notebook, we refactor to call train_and_evaluate instead of hand-coding our ML pipeline. This allows us to carry out evaluation as part of our training loop instead of as a separate step. It also adds in failure-handling that is necessary for distributed training capabilities.
End of explanation
"""
CSV_COLUMNS = ['fare_amount', 'pickuplon','pickuplat','dropofflon','dropofflat','passengers', 'key']
LABEL_COLUMN = 'fare_amount'
DEFAULTS = [[0.0], [-74.0], [40.0], [-74.0], [40.7], [1.0], ['nokey']]
def read_dataset(filename, mode, batch_size = 512):
def decode_csv(value_column):
columns = tf.compat.v1.decode_csv(value_column, record_defaults = DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Create list of file names that match "glob" pattern (i.e. data_file_*.csv)
filenames_dataset = tf.data.Dataset.list_files(filename)
# Read lines from text files
textlines_dataset = filenames_dataset.flat_map(tf.data.TextLineDataset)
# Parse text lines as comma-separated values (CSV)
dataset = textlines_dataset.map(decode_csv)
# Note:
# use tf.data.Dataset.flat_map to apply one to many transformations (here: filename -> text lines)
# use tf.data.Dataset.map to apply one to one transformations (here: text line -> feature list)
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset
"""
Explanation: <h2> Input </h2>
Read data created in Lab1a, but this time make it more general, so that we are reading in batches. Instead of using Pandas, we will use add a filename queue to the TensorFlow graph.
End of explanation
"""
INPUT_COLUMNS = [
tf.feature_column.numeric_column('pickuplon'),
tf.feature_column.numeric_column('pickuplat'),
tf.feature_column.numeric_column('dropofflat'),
tf.feature_column.numeric_column('dropofflon'),
tf.feature_column.numeric_column('passengers'),
]
def add_more_features(feats):
# Nothing to add (yet!)
return feats
feature_cols = add_more_features(INPUT_COLUMNS)
"""
Explanation: <h2> Create features out of input data </h2>
For now, pass these through. (same as previous lab)
End of explanation
"""
## TODO: Create serving input function
def serving_input_fn():
#ADD CODE HERE
return tf.estimator.export.ServingInputReceiver(features, json_feature_placeholders)
"""
Explanation: <h2> Serving input function </h2>
Defines the expected shape of the JSON feed that the modelwill receive once deployed behind a REST API in production.
End of explanation
"""
## TODO: Create train and evaluate function using tf.estimator
def train_and_evaluate(output_dir, num_train_steps):
#ADD CODE HERE
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
"""
Explanation: <h2> tf.estimator.train_and_evaluate </h2>
End of explanation
"""
OUTDIR = './taxi_trained'
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
tf.compat.v1.summary.FileWriterCache.clear()
train_and_evaluate(OUTDIR, num_train_steps = 2000)
"""
Explanation: <h2>Run training</h2>
End of explanation
"""
|
lilleswing/deepchem | examples/tutorials/04_Molecular_Fingerprints.ipynb | mit | !curl -Lo conda_installer.py https://raw.githubusercontent.com/deepchem/deepchem/master/scripts/colab_install.py
import conda_installer
conda_installer.install()
!/root/miniconda/bin/conda info -e
!pip install --pre deepchem
"""
Explanation: Tutorial 4: Molecular Fingerprints
Molecules can be represented in many ways. This tutorial introduces a type of representation called a "molecular fingerprint". It is a very simple representation that often works well for small drug-like molecules.
Colab
This tutorial and the rest in this sequence can be done in Google colab. If you'd like to open this notebook in colab, you can use the following link.
Setup
To run DeepChem within Colab, you'll need to run the following installation commands. This will take about 5 minutes to run to completion and install your environment. You can of course run this tutorial locally if you prefer. In that case, don't run these cells since they will download and install Anaconda on your local machine.
End of explanation
"""
import deepchem as dc
dc.__version__
"""
Explanation: We can now import the deepchem package to play with.
End of explanation
"""
tasks, datasets, transformers = dc.molnet.load_tox21(featurizer='ECFP')
train_dataset, valid_dataset, test_dataset = datasets
print(train_dataset)
"""
Explanation: What is a Fingerprint?
Deep learning models almost always take arrays of numbers as their inputs. If we want to process molecules with them, we somehow need to represent each molecule as one or more arrays of numbers.
Many (but not all) types of models require their inputs to have a fixed size. This can be a challenge for molecules, since different molecules have different numbers of atoms. If we want to use these types of models, we somehow need to represent variable sized molecules with fixed sized arrays.
Fingerprints are designed to address these problems. A fingerprint is a fixed length array, where different elements indicate the presence of different features in the molecule. If two molecules have similar fingerprints, that indicates they contain many of the same features, and therefore will likely have similar chemistry.
DeepChem supports a particular type of fingerprint called an "Extended Connectivity Fingerprint", or "ECFP" for short. They also are sometimes called "circular fingerprints". The ECFP algorithm begins by classifying atoms based only on their direct properties and bonds. Each unique pattern is a feature. For example, "carbon atom bonded to two hydrogens and two heavy atoms" would be a feature, and a particular element of the fingerprint is set to 1 for any molecule that contains that feature. It then iteratively identifies new features by looking at larger circular neighborhoods. One specific feature bonded to two other specific features becomes a higher level feature, and the corresponding element is set for any molecule that contains it. This continues for a fixed number of iterations, most often two.
Let's take a look at a dataset that has been featurized with ECFP.
End of explanation
"""
train_dataset.w
"""
Explanation: The feature array X has shape (6264, 1024). That means there are 6264 samples in the training set. Each one is represented by a fingerprint of length 1024. Also notice that the label array y has shape (6264, 12): this is a multitask dataset. Tox21 contains information about the toxicity of molecules. 12 different assays were used to look for signs of toxicity. The dataset records the results of all 12 assays, each as a different task.
Let's also take a look at the weights array.
End of explanation
"""
model = dc.models.MultitaskClassifier(n_tasks=12, n_features=1024, layer_sizes=[1000])
"""
Explanation: Notice that some elements are 0. The weights are being used to indicate missing data. Not all assays were actually performed on every molecule. Setting the weight for a sample or sample/task pair to 0 causes it to be ignored during fitting and evaluation. It will have no effect on the loss function or other metrics.
Most of the other weights are close to 1, but not exactly 1. This is done to balance the overall weight of positive and negative samples on each task. When training the model, we want each of the 12 tasks to contribute equally, and on each task we want to put equal weight on positive and negative samples. Otherwise, the model might just learn that most of the training samples are non-toxic, and therefore become biased toward identifying other molecules as non-toxic.
Training a Model on Fingerprints
Let's train a model. In earlier tutorials we use GraphConvModel, which is a fairly complicated architecture that takes a complex set of inputs. Because fingerprints are so simple, just a single fixed length array, we can use a much simpler type of model.
End of explanation
"""
import numpy as np
model.fit(train_dataset, nb_epoch=10)
metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
print('training set score:', model.evaluate(train_dataset, [metric], transformers))
print('test set score:', model.evaluate(test_dataset, [metric], transformers))
"""
Explanation: MultitaskClassifier is a simple stack of fully connected layers. In this example we tell it to use a single hidden layer of width 1000. We also tell it that each input will have 1024 features, and that it should produce predictions for 12 different tasks.
Why not train a separate model for each task? We could do that, but it turns out that training a single model for multiple tasks often works better. We will see an example of that in a later tutorial.
Let's train and evaluate the model.
End of explanation
"""
|
cathalmccabe/PYNQ | boards/Pynq-Z2/logictools/notebooks/fsm_generator.ipynb | bsd-3-clause | from pynq.overlays.logictools import LogicToolsOverlay
logictools_olay = LogicToolsOverlay('logictools.bit')
"""
Explanation: Finite State Machine Generator
This notebook will show how to use the Finite State Machine (FSM) Generator to generate a state machine. The FSM we will build is a Gray code counter. The counter has three state bits and can count up or down through eight states. The counter outputs are Gray coded, meaning that there is only a single-bit transition between the output vector of any state and its next states.
Step 1: Download the logictools overlay
End of explanation
"""
fsm_spec = {'inputs': [('reset','D0'), ('direction','D1')],
'outputs': [('bit2','D3'), ('bit1','D4'), ('bit0','D5')],
'states': ['S0', 'S1', 'S2', 'S3', 'S4', 'S5', 'S6', 'S7'],
'transitions': [['01', 'S0', 'S1', '000'],
['00', 'S0', 'S7', '000'],
['01', 'S1', 'S2', '001'],
['00', 'S1', 'S0', '001'],
['01', 'S2', 'S3', '011'],
['00', 'S2', 'S1', '011'],
['01', 'S3', 'S4', '010'],
['00', 'S3', 'S2', '010'],
['01', 'S4', 'S5', '110'],
['00', 'S4', 'S3', '110'],
['01', 'S5', 'S6', '111'],
['00', 'S5', 'S4', '111'],
['01', 'S6', 'S7', '101'],
['00', 'S6', 'S5', '101'],
['01', 'S7', 'S0', '100'],
['00', 'S7', 'S6', '100'],
['1-', '*', 'S0', '']]}
"""
Explanation: Step 2: Specify the FSM
End of explanation
"""
fsm_generator = logictools_olay.fsm_generator
"""
Explanation: Notes on the FSM specification format
Step 3: Instantiate the FSM generator object
End of explanation
"""
fsm_generator.trace()
"""
Explanation: Setup to use trace analyzer
In this notebook trace analyzer is used to check if the inputs and outputs of the FSM.
Users can choose whether to use the trace analyzer by calling the trace() method.
End of explanation
"""
fsm_generator.setup(fsm_spec)
"""
Explanation: Step 5: Setup the FSM generator
The FSM generator will work at the default frequency of 10MHz. This can be modified using a frequency argument in the setup() method.
End of explanation
"""
fsm_generator.show_state_diagram()
"""
Explanation: Display the FSM state diagram
This method should only be called after the generator has been properly set up.
End of explanation
"""
fsm_generator.run()
fsm_generator.show_waveform()
"""
Explanation: Set up the FSM inputs on the PYNQ board
* Check that the reset and direction inputs are correctly wired on the PYNQ board, as shown below:
* Connect D0 to GND
* Connect D1 to 3.3V
Notes:
The 3-bit Gray code counter is an up-down, wrap-around counter that will count from states 000 to 100 in either ascending or descending order
The reset input is connected to pin D0 of the Arduino connector
Connect the reset input to GND for normal operation
When the reset input is set to logic 1 (3.3V), the counter resets to state 000
The direction input is connected to pin D1 of the Arduino connector
When the direction is set to logic 0, the counter counts down
Conversely, when the direction input is set to logic 1, the counter counts up
Step 6: Run and display waveform
The run() method will execute all the samples, show_waveform() method is used to display the waveforms
End of explanation
"""
fsm_generator.stop()
"""
Explanation: Verify the trace output against the expected Gray code count sequence
| State | FSM output bits: bit2, bit1, bit0 |
|:-----:|:----------------------------------------:|
| s0 | 000 |
| s1 | 001 |
| s2 | 011 |
| s3 | 010 |
| s4 | 110 |
| s5 | 111 |
| s6 | 101 |
| s7 | 100 |
Step 7: Stop the FSM generator
Calling stop() will clear the logic values on output pins; however, the waveform will be recorded locally in the FSM instance.
End of explanation
"""
|
Upward-Spiral-Science/grelliam | code/inferential_simulation.ipynb | apache-2.0 | import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import itertools
import os
import csv
import igraph as ig
%matplotlib inline
font = {'weight' : 'bold',
'size' : 14}
import matplotlib
matplotlib.rc('font', **font)
np.random.seed(123456789) # for reproducibility, set random seed
alpha = 0.05 # define alpha
r = 20 # define number of rois
N = 100 # number of samples at each iteration
# define number of subjects per class
S = np.array((4, 6, 8, 10, 14, 18, 20, 26, 30, 40,
50, 60, 70, 80, 100, 120, 150, 200, 250,
300, 400, 500, 750, 1000, 1500, 2000,
3000, 5000))
"""
Explanation: Formal Simulated Inference
Define F (i.e. your model and assumptions)
Formalize test
Describe test statistic
Sample data from $F \in \mathscr{F}_0$
Sample data from $F \in \mathscr{F}_A$
Plot power vs n (i.e. perspective power analysis)
Plot power vs n (i.e. perspective power analysis)
Apply to data
Step 1: Define model and assumptions
Model
$G_i, Y_i \sim \mathscr{F}{G,Y} = { F{G,Y}(\cdot; \theta) : \theta \in \Theta }$.
Since, all samples observed are graph matched (i.e. nodes are equal across graphs), we can look at just the distribution of adjacency matrices:
$F_{G,Y} = F_{X,Y}$.
Thus,
$X_i = \prod_{u,v}^{\mathcal{E}} A_{uv}$, where $\mathcal{E} \subset V \times V$ <br/>
$Y_i = {0,1}$
Assumption
$F_{X|0} = ER(p_0) = Bern(p_0)^{V \times V}$ <br/>
$F_{X|1} = ER(p_1) = Bern(p_1)^{V \times V}$
Step 2: Formalize test
(note: always start as simply as reasonable, but no simpler)
$H_0: p_0 = p_1 $<br/>
$H_A: p_0 \neq p_1$
$\alpha$, the critical value, indicates the power of your null distribution under your test as n \to \infty
Step 3: Describe test statistic
$\hat{p_i} = \frac{\sum X}{| V \times V |}$
Intermediate Step
End of explanation
"""
pow_null = np.array((), dtype=np.dtype('float64'))
# compute this statistic for various sizes of datasets
for s in S:
s0 = s/2
s1 = s - s0
# compute this many times for each operating point to get average
pval = np.array((), dtype=np.dtype('float64'))
for _ in itertools.repeat(None,N):
g0 = 1 * (np.random.rand( r, r, s0) > 0.5) # (null), 0.52 (classes)
g1 = 1 * (np.random.rand( r, r, s1) > 0.5) # (null), 0.48 (classes)
# compute feature of data
pbar0 = 1.0*np.sum(g0, axis=(0,1))/( r**2 * s0)
pbar1 = 1.0*np.sum(g1, axis=(0,1))/( r**2 * s1)
# compute t-test on feature
pval = np.append(pval, stats.wilcoxon(pbar0, pbar1)[1])
# record average p value at operating point
pow_null = np.append(pow_null, np.sum(1.0*(pval < alpha))/N)
"""
Explanation: Step 4A: Sample data from null
End of explanation
"""
pow_alt = np.array((), dtype=np.dtype('float64'))
# compute this statistic for various sizes of datasets
for s in S:
s0 = s/2
s1 = s - s0
# compute this many times for each operating point to get average
pval = np.array((), dtype=np.dtype('float64'))
for _ in itertools.repeat(None,N):
g0 = 1 * (np.random.rand( r, r, s0) > 0.52) # (null), 0.52 (classes)
g1 = 1 * (np.random.rand( r, r, s1) > 0.48) # (null), 0.48 (classes)
# compute feature of data
pbar0 = 1.0*np.sum(g0, axis=(0,1))/( r**2 * s0)
pbar1 = 1.0*np.sum(g1, axis=(0,1))/( r**2 * s0)
# compute t-test on feature
pval = np.append(pval, stats.wilcoxon(pbar0, pbar1)[1])
# record average p value at operating point
pow_alt = np.append(pow_alt, np.sum(1.0*(pval < alpha))/N)
"""
Explanation: Step 4B: Sample data from alternate
End of explanation
"""
plt.figure(figsize=(8, 5))
plt.scatter(S, pow_null, hold=True, label='null')
plt.scatter(S, pow_alt, color='green', hold=True, label='alt')
plt.xscale('log')
plt.xlabel('Number of Samples')
plt.xlim((0,10000))
plt.ylim((-0.05, 1.05))
plt.ylabel('Power')
plt.title('Strength of Gender Classification Using Wilcoxon Test')
plt.axhline(alpha, color='red', linestyle='--', label='alpha')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('../figs/wilcoxon_classification.png')
plt.show()
"""
Explanation: Step 5: Plot power vs n on null set
End of explanation
"""
# Initializing dataset names
dnames = list(['../data/desikan/KKI2009'])
print "Dataset: " + ", ".join(dnames)
# Getting graph names
fs = list()
for dd in dnames:
fs.extend([root+'/'+file for root, dir, files in os.walk(dd) for file in files])
fs = fs[:]
def loadGraphs(filenames, rois, printer=False):
A = np.zeros((rois, rois, len(filenames)))
for idx, files in enumerate(filenames):
if printer:
print "Loading: " + files
g = ig.Graph.Read_GraphML(files)
tempg = g.get_adjacency(attribute='weight')
A[:,:,idx] = np.asarray(tempg.data)
return A
# Load X
X = loadGraphs(fs, 70)
print X.shape
# Load Y
ys = csv.reader(open('../data/kki42_subjectinformation.csv'))
y = [y[5] for y in ys]
y = y[1:]
g_m = np.zeros((70, 70, sum([1 if x=='M' else 0 for x in y])))
g_f = np.zeros((70, 70, sum([1 if x=='F' else 0 for x in y])))
cf=0
cm=0
for idx, val in enumerate(y):
if val == 'M':
g_m[:,:,cm] = X[:,:,idx]
cm += 1
else:
g_f[:,:,cf] = X[:,:,idx]
cf +=1
print g_f.shape
print g_m.shape
# compute feature of data
p_f = 1.0*np.sum(1.0*(g_f>0), axis=(0,1))/( 70**2 * 20)
p_m = 1.0*np.sum(1.0*(g_m>0), axis=(0,1))/( 70**2 * 22)
print "Mean p_f: " + str(np.mean(p_f))
print "Mean p_m: " + str(np.mean(p_m))
# compute t-test on feature
pval = stats.wilcoxon(p_m[:20], p_f)[1]
print "P-value: " + str(pval)
"""
Explanation: Step 6: Apply the above to data
End of explanation
"""
|
dariox2/CADL | session-3/lecture-3.ipynb | apache-2.0 | # imports
%matplotlib inline
# %pylab osx
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
# Some additional libraries which we'll use just
# to produce some visualizations of our training
from libs.utils import montage
from libs import gif
import IPython.display as ipyd
plt.style.use('ggplot')
# Bit of formatting because I don't like the default inline code style:
from IPython.core.display import HTML
HTML("""<style> .rendered_html code {
padding: 2px 4px;
color: #c7254e;
background-color: #f9f2f4;
border-radius: 4px;
} </style>""")
"""
Explanation: Session 3: Unsupervised and Supervised Learning
<p class="lead">
Parag K. Mital<br />
<a href="https://www.kadenze.com/courses/creative-applications-of-deep-learning-with-tensorflow/info">Creative Applications of Deep Learning w/ Tensorflow</a><br />
<a href="https://www.kadenze.com/partners/kadenze-academy">Kadenze Academy</a><br />
<a href="https://twitter.com/hashtag/CADL">#CADL</a>
</p>
<a name="learning-goals"></a>
Learning Goals
Build an autoencoder w/ linear and convolutional layers
Understand how one hot encodings work
Build a classification network w/ linear and convolutional layers
<!-- MarkdownTOC autolink=true autoanchor=true bracket=round -->
Introduction
Unsupervised vs. Supervised Learning
Autoencoders
MNIST
Fully Connected Model
Convolutional Autoencoder
Denoising Autoencoder
Variational Autoencoders
Predicting Image Labels
One-Hot Encoding
Using Regression for Classification
Fully Connected Network
Convolutional Networks
Saving/Loading Models
Checkpoint
Protobuf
Wrap Up
Reading
<!-- /MarkdownTOC -->
<a name="introduction"></a>
Introduction
In the last session we created our first neural network.
We saw that in order to create a neural network, we needed to define a cost function which would allow gradient descent to optimize all the parameters in our network <TODO: Insert animation of gradient descent from previous session>. We also saw how neural networks become much more expressive by introducing series of linearities followed by non-linearities, or activation functions. <TODO: Insert graphic of activation functions from previous session>.
We then explored a fun application of neural networks using regression to learn to paint color values given x, y positions. This allowed us to build up a sort of painterly like version of an image.
In this session, we'll see how to use some simple deep nets with about 3 or 4 layers capable of performing unsupervised and supervised learning, and I'll explain those terms in a bit. The components we learn here will let us explore data in some very interesting ways.
<a name="unsupervised-vs-supervised-learning"></a>
Unsupervised vs. Supervised Learning
Machine learning research in deep networks performs one of two types of learning. You either have a lot of data and you want the computer to reason about it, maybe to encode the data using less data, and just explore what patterns there might be. That's useful for clustering data, reducing the dimensionality of the data, or even for generating new data. That's generally known as unsupervised learning. In the supervised case, you actually know what you want out of your data. You have something like a label or a class that is paired with every single piece of data. In this first half of this session, we'll see how unsupervised learning works using something called an autoencoder and how it can be extended using convolution.. Then we'll get into supervised learning and show how we can build networks for performing regression and classification. By the end of this session, hopefully all of that will make a little more sense. Don't worry if it doesn't yet! Really the best way to learn is to put this stuff into practice in the homeworks.
<a name="autoencoders"></a>
Autoencoders
<TODO: Graphic of autoencoder network diagram>
An autoencoder is a type of neural network that learns to encode its inputs, often using much less data. It does so in a way that it can still output the original input with just the encoded values. For it to learn, it does not require "labels" as its output. Instead, it tries to output whatever it was given as input. So in goes an image, and out should also go the same image. But it has to be able to retain all the details of the image, even after possibly reducing the information down to just a few numbers.
We'll also explore how this method can be extended and used to cluster or organize a dataset, or to explore latent dimensions of a dataset that explain some interesting ideas. For instance, we'll see how with handwritten numbers, we will be able to see how each number can be encoded in the autoencoder without ever telling it which number is which.
<TODO: place teaser of MNIST video learning>
But before we get there, we're going to need to develop an understanding of a few more concepts.
First, imagine a network that takes as input an image. The network can be composed of either matrix multiplications or convolutions to any number of filters or dimensions. At the end of any processing, the network has to be able to recompose the original image it was input.
In the last session, we saw how to build a network capable of taking 2 inputs representing the row and column of an image, and predicting 3 outputs, the red, green, and blue colors. Instead if having 2 inputs, we'll now have an entire image as an input, the brightness of every pixel in our image. And as output, we're going to have the same thing, the entire image being output.
<a name="mnist"></a>
MNIST
Let's first get some standard imports:
End of explanation
"""
from libs.datasets import MNIST
ds = MNIST()
"""
Explanation: Then we're going to try this with the MNIST dataset, which I've included a simple interface for in the libs module.
End of explanation
"""
# ds.<tab>
"""
Explanation: Let's take a look at what this returns:
End of explanation
"""
print(ds.X.shape)
"""
Explanation: So we can see that there are a few interesting accessors. ... we're not going to worry about the labels until a bit later when we talk about a different type of model which can go from the input image to predicting which label the image is. But for now, we're going to focus on trying to encode the image and be able to reconstruct the image from our encoding. let's take a look at the images which are stored in the variable X. Remember, in this course, we'll always use the variable X to denote the input to a network. and we'll use the variable Y to denote its output.
End of explanation
"""
plt.imshow(ds.X[0].reshape((28, 28)))
# Let's get the first 1000 images of the dataset and reshape them
imgs = ds.X[:1000].reshape((-1, 28, 28))
# Then create a montage and draw the montage
plt.imshow(montage(imgs), cmap='gray')
"""
Explanation: So each image has 784 features, and there are 70k of them. If we want to draw the image, we're going to have to reshape it to a square. 28 x 28 is 784. So we're just going to reshape it to a square so that we can see all the pixels arranged in rows and columns instead of one giant vector.
End of explanation
"""
# Take the mean across all images
mean_img = np.mean(ds.X, axis=0)
# Then plot the mean image.
plt.figure()
plt.imshow(mean_img.reshape((28, 28)), cmap='gray')
"""
Explanation: Let's take a look at the mean of the dataset:
End of explanation
"""
# Take the std across all images
std_img = np.std(ds.X, axis=0)
# Then plot the std image.
plt.figure()
plt.imshow(std_img.reshape((28, 28)))
"""
Explanation: And the standard deviation
End of explanation
"""
dimensions = [512, 256, 128, 64]
"""
Explanation: So recall from session 1 that these two images are really saying whats more or less contant across every image, and what's changing. We're going to try and use an autoencoder to try to encode everything that could possibly change in the image.
<a name="fully-connected-model"></a>
Fully Connected Model
To try and encode our dataset, we are going to build a series of fully connected layers that get progressively smaller. So in neural net speak, every pixel is going to become its own input neuron. And from the original 784 neurons, we're going to slowly reduce that information down to smaller and smaller numbers. It's often standard practice to use other powers of 2 or 10. I'll create a list of the number of dimensions we'll use for each new layer.
End of explanation
"""
# So the number of features is the second dimension of our inputs matrix, 784
n_features = ds.X.shape[1]
# And we'll create a placeholder in the tensorflow graph that will be able to get any number of n_feature inputs.
X = tf.placeholder(tf.float32, [None, n_features])
"""
Explanation: So we're going to reduce our 784 dimensions down to 512 by multiplyling them by a 784 x 512 dimensional matrix. Then we'll do the same thing again using a 512 x 256 dimensional matrix, to reduce our dimensions down to 256 dimensions, and then again to 128 dimensions, then finally to 64. To get back to the size of the image, we're going to just going to do the reverse. But we're going to use the exact same matrices. We do that by taking the transpose of the matrix, which reshapes the matrix so that the rows become columns, and vice-versa. So our last matrix which was 128 rows x 64 columns, when transposed, becomes 64 rows x 128 columns.
So by sharing the weights in the network, we're only really learning half of the network, and those 4 matrices are going to make up the bulk of our model. We just have to find out what they are using gradient descent.
We're first going to create placeholders for our tensorflow graph. We're going to set the first dimension to None. This is something special for placeholders which tells tensorflow "let this dimension be any possible value". 1, 5, 100, 1000, it doesn't matter. We're going to pass our entire dataset in minibatches. So we'll send 100 images at a time. But we'd also like to be able to send in only 1 image and see what the prediction of the network is. That's why we let this dimension be flexible in the graph.
End of explanation
"""
# let's first copy our X placeholder to the name current_input
current_input = X
n_input = n_features
# We're going to keep every matrix we create so let's create a list to hold them all
Ws = []
# We'll create a for loop to create each layer:
for layer_i, n_output in enumerate(dimensions):
# just like in the last session,
# we'll use a variable scope to help encapsulate our variables
# This will simply prefix all the variables made in this scope
# with the name we give it.
with tf.variable_scope("encoder/layer/{}".format(layer_i)):
# Create a weight matrix which will increasingly reduce
# down the amount of information in the input by performing
# a matrix multiplication
W = tf.get_variable(
name='W',
shape=[n_input, n_output],
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.02))
# Now we'll multiply our input by our newly created W matrix
# and add the bias
h = tf.matmul(current_input, W)
# And then use a relu activation function on its output
current_input = tf.nn.relu(h)
# Finally we'll store the weight matrix so we can build the decoder.
Ws.append(W)
# We'll also replace n_input with the current n_output, so that on the
# next iteration, our new number inputs will be correct.
n_input = n_output
"""
Explanation: Now we're going to create a network which will perform a series of multiplications on X, followed by adding a bias, and then wrapping all of this in a non-linearity:
End of explanation
"""
print(current_input.get_shape())
"""
Explanation: So now we've created a series of multiplications in our graph which take us from our input of batch size times number of features which started as None x 784, and then we're multiplying it by a series of matrices which will change the size down to None x 64.
End of explanation
"""
# We'll first reverse the order of our weight matrices
Ws = Ws[::-1]
# then reverse the order of our dimensions
# appending the last layers number of inputs.
dimensions = dimensions[::-1][1:] + [ds.X.shape[1]]
print(dimensions)
for layer_i, n_output in enumerate(dimensions):
# we'll use a variable scope again to help encapsulate our variables
# This will simply prefix all the variables made in this scope
# with the name we give it.
with tf.variable_scope("decoder/layer/{}".format(layer_i)):
# Now we'll grab the weight matrix we created before and transpose it
# So a 3072 x 784 matrix would become 784 x 3072
# or a 256 x 64 matrix, would become 64 x 256
W = tf.transpose(Ws[layer_i])
# Now we'll multiply our input by our transposed W matrix
h = tf.matmul(current_input, W)
# And then use a relu activation function on its output
current_input = tf.nn.relu(h)
# We'll also replace n_input with the current n_output, so that on the
# next iteration, our new number inputs will be correct.
n_input = n_output
"""
Explanation: In order to get back to the original dimensions of the image, we're going to reverse everything we just did. Let's see how we do that:
End of explanation
"""
Y = current_input
"""
Explanation: After this, our current_input will become the output of the network:
End of explanation
"""
# We'll first measure the average difference across every pixel
cost = tf.reduce_mean(tf.squared_difference(X, Y), 1)
print(cost.get_shape())
"""
Explanation: Now that we have the output of the network, we just need to define a training signal to train the network with. To do that, we create a cost function which will measure how well the network is doing:
End of explanation
"""
cost = tf.reduce_mean(cost)
"""
Explanation: And then take the mean again across batches:
End of explanation
"""
learning_rate = 0.001
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
"""
Explanation: We can now train our network just like we did in the last session. We'll need to create an optimizer which takes a parameter learning_rate. And we tell it that we want to minimize our cost, which is measuring the difference between the output of the network and the input.
End of explanation
"""
# %%
# We create a session to use the graph
sess = tf.Session()
sess.run(tf.initialize_all_variables())
"""
Explanation: Now we'll create a session to manage the training in minibatches:
End of explanation
"""
# Some parameters for training
batch_size = 100
n_epochs = 5
# We'll try to reconstruct the same first 100 images and show how
# The network does over the course of training.
examples = ds.X[:100]
# We'll store the reconstructions in a list
imgs = []
fig, ax = plt.subplots(1, 1)
for epoch_i in range(n_epochs):
for batch_X, _ in ds.train.next_batch():
sess.run(optimizer, feed_dict={X: batch_X - mean_img})
recon = sess.run(Y, feed_dict={X: examples - mean_img})
recon = np.clip((recon + mean_img).reshape((-1, 28, 28)), 0, 255)
img_i = montage(recon).astype(np.uint8)
imgs.append(img_i)
ax.imshow(img_i, cmap='gray')
fig.canvas.draw()
print(epoch_i, sess.run(cost, feed_dict={X: batch_X - mean_img}))
gif.build_gif(imgs, saveto='ae.gif', cmap='gray')
ipyd.Image(url='ae.gif?{}'.format(np.random.rand()),
height=500, width=500)
"""
Explanation: Now we'll train:
End of explanation
"""
from tensorflow.python.framework.ops import reset_default_graph
reset_default_graph()
# And we'll create a placeholder in the tensorflow graph that will be able to get any number of n_feature inputs.
X = tf.placeholder(tf.float32, [None, n_features])
"""
Explanation: <a name="convolutional-autoencoder"></a>
Convolutional Autoencoder
To get even better encodings, we can also try building a convolutional network. Why would a convolutional network perform any different to a fully connected one? Let's see what we were doing in the fully connected network. For every pixel in our input, we have a set of weights corresponding to every output neuron. Those weights are unique to each pixel. Each pixel gets its own row in the weight matrix. That really doesn't make a lot of sense, since we would guess that nearby pixels are probably not going to be so different. And we're not really encoding what's happening around that pixel, just what that one pixel is doing.
In a convolutional model, we're explicitly modeling what happens around a pixel. And we're using the exact same convolutions no matter where in the image we are. But we're going to use a lot of different convolutions.
Recall in session 1 we created a Gaussian and Gabor kernel and used this to convolve an image to either blur it or to accentuate edges. Armed with what you know now, you could try to train a network to learn the parameters that map an untouched image to a blurred or edge filtered version of it. What you should find is the kernel will look sort of what we built by hand. I'll leave that as an excercise for you.
But in fact, that's too easy really. That's just 1 filter you would have to learn. We're going to see how we can use many convolutional filters, way more than 1, and how it will help us to encode the MNIST dataset.
To begin we'll need to reset the current graph and start over.
End of explanation
"""
X_tensor = tf.reshape(X, [-1, 28, 28, 1])
"""
Explanation: Since X is currently [batch, height*width], we need to reshape it to a
4-D tensor to use it in a convolutional graph. Remember back to the first session that in order to perform convolution, we have to use 4-dimensional tensors describing the:
N x H x W x C
We'll reshape our input placeholder by telling the shape parameter to be these new dimensions. However, since our batch dimension is None, we cannot reshape without using the special value -1, which says that the size of that dimension should be computed so that the total size remains constant. Since we haven't defined the batch dimension's shape yet, we use -1 to denote this
dimension should not change size.
End of explanation
"""
n_filters = [16, 16, 16]
filter_sizes = [4, 4, 4]
"""
Explanation: We'll now setup the first convolutional layer. Remember from Session 2 that the weight matrix for convolution should be
[height x width x input_channels x output_channels]
Think a moment about how this is different to the fully connected network. In the fully connected network, every pixel was being multiplied by its own weight to every other neuron. With a convolutional network, we use the extra dimensions to allow the same set of filters to be applied everywhere across an image. This is also known in the literature as weight sharing, since we're sharing the weights no matter where in the input we are. That's unlike the fully connected approach, which has unique weights for every pixel. What's more is after we've performed the convolution, we've retained the spatial organization of the input. We still have dimensions of height and width. That's again unlike the fully connected network which effectively shuffles or takes int account information from everywhere, not at all caring about where anything is. That can be useful or not depending on what we're trying to achieve. Often, it is something we might want to do after a series of convolutions to encode translation invariance. Don't worry about that for now. With MNIST especially we won't need to do that since all of the numbers are in the same position.
Now with our tensor ready, we're going to do what we've just done with the fully connected autoencoder. Except, instead of performing matrix multiplications, we're going to create convolution operations. To do that, we'll need to decide on a few parameters including the filter size, how many convolution filters we want, and how many layers we want. I'll start with a fairly small network, and let you scale this up in your own time.
End of explanation
"""
current_input = X_tensor
# notice instead of having 784 as our input features, we're going to have
# just 1, corresponding to the number of channels in the image.
# We're going to use convolution to find 16 filters, or 16 channels of information in each spatial location we perform convolution at.
n_input = 1
# We're going to keep every matrix we create so let's create a list to hold them all
Ws = []
shapes = []
# We'll create a for loop to create each layer:
for layer_i, n_output in enumerate(n_filters):
# just like in the last session,
# we'll use a variable scope to help encapsulate our variables
# This will simply prefix all the variables made in this scope
# with the name we give it.
with tf.variable_scope("encoder/layer/{}".format(layer_i)):
# we'll keep track of the shapes of each layer
# As we'll need these for the decoder
shapes.append(current_input.get_shape().as_list())
# Create a weight matrix which will increasingly reduce
# down the amount of information in the input by performing
# a matrix multiplication
W = tf.get_variable(
name='W',
shape=[
filter_sizes[layer_i],
filter_sizes[layer_i],
n_input,
n_output],
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.02))
# Now we'll convolve our input by our newly created W matrix
h = tf.nn.conv2d(current_input, W,
strides=[1, 2, 2, 1], padding='SAME')
# And then use a relu activation function on its output
current_input = tf.nn.relu(h)
# Finally we'll store the weight matrix so we can build the decoder.
Ws.append(W)
# We'll also replace n_input with the current n_output, so that on the
# next iteration, our new number inputs will be correct.
n_input = n_output
"""
Explanation: Now we'll create a loop to create every layer's convolution, storing the convolution operations we create so that we can do the reverse.
End of explanation
"""
# We'll first reverse the order of our weight matrices
Ws.reverse()
# and the shapes of each layer
shapes.reverse()
# and the number of filters (which is the same but could have been different)
n_filters.reverse()
# and append the last filter size which is our input image's number of channels
n_filters = n_filters[1:] + [1]
print(n_filters, filter_sizes, shapes)
# and then loop through our convolution filters and get back our input image
# we'll enumerate the shapes list to get us there
for layer_i, shape in enumerate(shapes):
# we'll use a variable scope to help encapsulate our variables
# This will simply prefix all the variables made in this scope
# with the name we give it.
with tf.variable_scope("decoder/layer/{}".format(layer_i)):
# Create a weight matrix which will increasingly reduce
# down the amount of information in the input by performing
# a matrix multiplication
W = Ws[layer_i]
# Now we'll convolve by the transpose of our previous convolution tensor
h = tf.nn.conv2d_transpose(current_input, W,
tf.pack([tf.shape(X)[0], shape[1], shape[2], shape[3]]),
strides=[1, 2, 2, 1], padding='SAME')
# And then use a relu activation function on its output
current_input = tf.nn.relu(h)
"""
Explanation: Now with our convolutional encoder built and the encoding weights stored, we'll reverse the whole process to decode everything back out to the original image.
End of explanation
"""
Y = current_input
Y = tf.reshape(Y, [-1, n_features])
"""
Explanation: Now we have the reconstruction through the network:
End of explanation
"""
cost = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(X, Y), 1))
learning_rate = 0.001
# pass learning rate and cost to optimize
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# Session to manage vars/train
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# Some parameters for training
batch_size = 100
n_epochs = 5
# We'll try to reconstruct the same first 100 images and show how
# The network does over the course of training.
examples = ds.X[:100]
# We'll store the reconstructions in a list
imgs = []
fig, ax = plt.subplots(1, 1)
for epoch_i in range(n_epochs):
for batch_X, _ in ds.train.next_batch():
sess.run(optimizer, feed_dict={X: batch_X - mean_img})
recon = sess.run(Y, feed_dict={X: examples - mean_img})
recon = np.clip((recon + mean_img).reshape((-1, 28, 28)), 0, 255)
img_i = montage(recon).astype(np.uint8)
imgs.append(img_i)
ax.imshow(img_i, cmap='gray')
fig.canvas.draw()
print(epoch_i, sess.run(cost, feed_dict={X: batch_X - mean_img}))
gif.build_gif(imgs, saveto='conv-ae.gif', cmap='gray')
ipyd.Image(url='conv-ae.gif?{}'.format(np.random.rand()),
height=500, width=500)
"""
Explanation: We can measure the cost and train exactly like before with the fully connected network:
End of explanation
"""
from libs import datasets
# ds = datasets.MNIST(one_hot=True)
"""
Explanation: <a name="denoising-autoencoder"></a>
Denoising Autoencoder
The denoising autoencoder is a very simple extension to an autoencoder. Instead of seeing the input, it is corrupted, for instance by masked noise. but the reconstruction loss is still measured on the original uncorrupted image. What this does is lets the model try to interpret occluded or missing parts of the thing it is reasoning about. It would make sense for many models, that not every datapoint in an input is necessary to understand what is going on. Denoising autoencoders try to enforce that, and as a result, the encodings at the middle most layer are often far more representative of the actual classes of different objects.
In the resources section, you'll see that I've included a general framework autoencoder allowing you to use either a fully connected or convolutional autoencoder, and whether or not to include denoising. If you interested in the mechanics of how this works, I encourage you to have a look at the code.
<a name="variational-autoencoders"></a>
Variational Autoencoders
A variational autoencoder extends the traditional autoencoder by using an additional layer called the variational layer. It is actually two networks that are cleverly connected using a simple reparameterization trick, to help the gradient flow through both networks during backpropagation allowing both to be optimized.
We dont' have enough time to get into the details, but I'll try to quickly explain: it tries to optimize the likelihood that a particular distribution would create an image, rather than trying to optimize simply the L2 loss at the end of the network. Or put another way it hopes that there is some distribution that a distribution of image encodings could be defined as. This is a bit tricky to grasp, so don't worry if you don't understand the details. The major difference to hone in on is that instead of optimizing distance in the input space of pixel to pixel distance, which is actually quite arbitrary if you think about it... why would we care about the exact pixels being the same? Human vision would not care for most cases, if there was a slight translation of our image, then the distance could be very high, but we would never be able to tell the difference. So intuitively, measuring error based on raw pixel to pixel distance is not such a great approach.
Instead of relying on raw pixel differences, the variational autoencoder tries to optimize two networks. One which says that given my pixels, I am pretty sure I can encode them to the parameters of some well known distribution, like a set of Gaussians, instead of some artbitrary density of values. And then I can optimize the latent space, by saying that particular distribution should be able to represent my entire dataset, and I try to optimize the likelihood that it will create the images I feed through a network. So distance is somehow encoded in this latent space. Of course I appreciate that is a difficult concept so forgive me for not being able to expand on it in more details.
But to make up for the lack of time and explanation, I've included this model under the resources section for you to play with! Just like the "vanilla" autoencoder, this one supports both fully connected, convolutional, and denoising models.
This model performs so much better than the vanilla autoencoder. In fact, it performs so well that I can even manage to encode the majority of MNIST into 2 values. The following visualization demonstrates the learning of a variational autoencoder over time.
<mnist visualization>
There are of course a lot more interesting applications of such a model. You could for instance, try encoding a more interesting dataset, such as CIFAR which you'll find a wrapper for in the libs/datasets module.
<TODO: produce GIF visualization madness>
Or the celeb faces dataset:
<celeb dataset>
Or you could try encoding an entire movie. We tried it with the copyleft movie, "Sita Sings The Blues". Every 2 seconds, we stored an image of this movie, and then fed all of these images to a deep variational autoencoder. This is the result.
<show sita sings the blues training images>
And I'm sure we can get closer with deeper nets and more train time. But notice how in both celeb faces and sita sings the blues, the decoding is really blurred. That is because of the assumption of the underlying representational space. We're saying the latent space must be modeled as a gaussian, and those factors must be distributed as a gaussian. This enforces a sort of discretization of my representation, enforced by the noise parameter of the gaussian. In the last session, we'll see how we can avoid this sort of blurred representation and get even better decodings using a generative adversarial network.
For now, consider the applications that this method opens up. Once you have an encoding of a movie, or image dataset, you are able to do some very interesting things. You have effectively stored all the representations of that movie, although its not perfect of course. But, you could for instance, see how another movie would be interpretted by the same network. That's similar to what Terrance Broad did for his project on reconstructing blade runner and a scanner darkly, though he made use of both the variational autoencoder and the generative adversarial network. We're going to look at that network in more detail in the last session.
We'll also look at how to properly handle very large datasets like celeb faces or the one used here to create the sita sings the blues autoencoder. Taking every 60th frame of Sita Sings The Blues gives you aobut 300k images. And that's a lot of data to try and load in all at once. We had to size it down considerably, and make use of what's called a tensorflow input pipeline. I've included all the code for training this network, which took about 1 day on a fairly powerful machine, but I will not get into the details of the image pipeline bits until session 5 when we look at generative adversarial networks. I'm delaying this because we'll need to learn a few things along the way before we can build such a network.
<a name="predicting-image-labels"></a>
Predicting Image Labels
We've just seen a variety of types of autoencoders and how they are capable of compressing information down to its inner most layer while still being able to retain most of the interesting details. Considering that the CelebNet dataset was nearly 200 thousand images of 64 x 64 x 3 pixels, and we're able to express those with just an inner layer of 50 values, that's just magic basically. Magic.
Okay, let's move on now to a different type of learning often called supervised learning. Unlike what we just did, which is work with a set of data and not have any idea what that data should be labeled as, we're going to explicitly tell the network what we want it to be labeled by saying what the network should output for a given input. In the previous cause, we just had a set of Xs, our images. Now, we're going to have Xs and Ys given to us, and use the Xs to try and output the Ys.
With MNIST, the outputs of each image are simply what numbers are drawn in the input image. The wrapper for grabbing this dataset from the libs module takes an additional parameter which I didn't talk about called one_hot.
End of explanation
"""
ds = datasets.MNIST(one_hot=False)
# let's look at the first label
print(ds.Y[0])
# okay and what does the input look like
plt.imshow(np.reshape(ds.X[0], (28, 28)), cmap='gray')
# great it is just the label of the image
plt.figure()
# Let's look at the next one just to be sure
print(ds.Y[1])
# Yea the same idea
plt.imshow(np.reshape(ds.X[1], (28, 28)), cmap='gray')
"""
Explanation: To see what this is doing, let's compare setting it to false versus true:
End of explanation
"""
ds = datasets.MNIST(one_hot=True)
plt.figure()
plt.imshow(np.reshape(ds.X[0], (28, 28)), cmap='gray')
print(ds.Y[0])
# array([ 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.])
# Woah a bunch more numbers. 10 to be exact, which is also the number
# of different labels in the dataset.
plt.imshow(np.reshape(ds.X[1], (28, 28)), cmap='gray')
print(ds.Y[1])
# array([ 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.])
"""
Explanation: And now let's look at what the one hot version looks like:
End of explanation
"""
print(ds.X.shape)
"""
Explanation: So instead of have a number from 0-9, we have 10 numbers corresponding to the digits, 0-9, and each value is either 0 or 1. Whichever digit the image represents is the one that is 1.
To summarize, we have all of the images of the dataset stored as:
n_observations x n_features tensor (n-dim array)
End of explanation
"""
print(ds.Y.shape)
print(ds.Y[0])
"""
Explanation: And labels stored as n_observations x n_labels where each observation is a one-hot vector, where only one element is 1 indicating which class or label it is.
End of explanation
"""
# cost = tf.reduce_sum(tf.abs(y_pred - y_true))
"""
Explanation: <a name="one-hot-encoding"></a>
One-Hot Encoding
Remember in the last session, we saw how to build a network capable of taking 2 inputs representing the row and column of an image, and predicting 3 outputs, the red, green, and blue colors. Just like in our unsupervised model, instead of having 2 inputs, we'll now have 784 inputs, the brightness of every pixel in our image. And instead of 3 outputs, like in our painting network from last session, or the 784 outputs we had in our unsupervised MNIST network, we'll now have 10 outputs representing the one-hot encoding of its label.
So why don't we just have 1 output? A number from 0-9? Wouldn't having 10 different outputs instead of just 1 be harder to learn? Consider how we normally train the network. We have to give it a cost which it will use to minimize. What could our cost be if our output was just a single number, 0-9? We would still have the true label, and the predicted label. Could we just take the subtraction of the two values? e.g. the network predicted 0, but the image was really the number 8. Okay so then our distance could be:
End of explanation
"""
import tensorflow as tf
from libs import datasets
ds = datasets.MNIST(split=[0.8, 0.1, 0.1])
n_input = 28 * 28
"""
Explanation: But in this example, the cost would be 8. If the image was a 4, and the network predicted a 0 again, the cost would be 4... but isn't the network still just as wrong, not half as much as when the image was an 8? In a one-hot encoding, the cost would be 1 for both, meaning they are both just as wrong. So we're able to better measure the cost, by separating each class's label into its own dimension.
<a name="using-regression-for-classification"></a>
Using Regression for Classification
The network we build will be trained to output values between 0 and 1. They won't output exactly a 0 or 1. But rather, they are able to produce any value. 0, 0.1, 0.2, ... and that means the networks we've been using are actually performing regression. In regression, the output is "continuous", rather than "discrete". The difference is this: a discrete output means the network can only output one of a few things. Like, 0, 1, 2, or 3, and that's it. But a continuous output means it can output any real number.
In order to perform what's called classification, we're just simply going to look at whichever value is the highest in our one hot encoding. In order to do that a little better, we're actually going interpret our one hot encodings as probabilities by scaling the total output by their sum. What this does is allows us to understand that as we grow more confident in one prediction, we should grow less confident in all other predictions. We only have so much certainty to go around, enough to add up to 1. If we think the image might also be the number 1, then we lose some certainty of it being the number 0.
It turns out there is a better cost function that simply measuring the distance between two vectors when they are probabilities. It's called cross entropy:
\begin{align}
\Large{H(x) = -\sum{y_{\text{t}}(x) * \log(y_{\text{p}}(x))}}
\end{align}
What this equation does is measures the similarity of our prediction with our true distribution, by exponentially increasing error whenever our prediction gets closer to 1 when it should be 0, and similarly by exponentially increasing error whenever our prediction gets closer to 0, when it should be 1. I won't go into more detail here, but just know that we'll be using this measure instead of a normal distance measure.
<a name="fully-connected-network"></a>
Fully Connected Network
Defining the Network
Let's see how our one hot encoding and our new cost function will come into play. We'll create our network for predicting image classes in pretty much the same way we've created previous networks:
We will have as input to the network 28 x 28 values.
End of explanation
"""
n_output = 10
"""
Explanation: As output, we have our 10 one-hot-encoding values
End of explanation
"""
X = tf.placeholder(tf.float32, [None, n_input])
"""
Explanation: We're going to create placeholders for our tensorflow graph. We're going to set the first dimension to None. Remember from our unsupervised model, this is just something special for placeholders which tells tensorflow "let this dimension be any possible value". 1, 5, 100, 1000, it doesn't matter. Since we're going to pass our entire dataset in batches we'll need this to be say 100 images at a time. But we'd also like to be able to send in only 1 image and see what the prediction of the network is. That's why we let this dimension be flexible.
End of explanation
"""
Y = tf.placeholder(tf.float32, [None, n_output])
"""
Explanation: For the output, we'll have None again, since for every input, we'll have the same number of images that have outputs.
End of explanation
"""
# We'll use the linear layer we created in the last session, which I've stored in the libs file:
# NOTE: The lecture used an older version of this function which had a slightly different definition.
from libs import utils
Y_pred, W = utils.linear(
x=X,
n_output=n_output,
activation=tf.nn.softmax,
name='layer1')
"""
Explanation: Now we'll connect our input to the output with a linear layer. Instead of relu, we're going to use softmax. This will perform our exponential scaling of the outputs and make sure the output sums to 1, making it a probability.
End of explanation
"""
# We add 1e-12 because the log is undefined at 0.
cross_entropy = -tf.reduce_sum(Y * tf.log(Y_pred + 1e-12))
optimizer = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
"""
Explanation: And then we write our loss function as the cross entropy. And then we'll give our optimizer the cross_entropy measure just like we would with GradientDescent. The formula for cross entropy is:
\begin{align}
\Large{H(x) = -\sum{\text{Y}{\text{true}} * log(\text{Y}{pred})}}
\end{align}
End of explanation
"""
predicted_y = tf.argmax(Y_pred, 1)
actual_y = tf.argmax(Y, 1)
"""
Explanation: To determine the correct class from our regression output, we have to take the maximum index.
End of explanation
"""
correct_prediction = tf.equal(predicted_y, actual_y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
"""
Explanation: We can then measure the accuracy by seeing whenever these are equal. Note, this is just for us to see, and is not at all used to "train" the network!
End of explanation
"""
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# Now actually do some training:
batch_size = 50
n_epochs = 5
for epoch_i in range(n_epochs):
for batch_xs, batch_ys in ds.train.next_batch():
sess.run(optimizer, feed_dict={
X: batch_xs,
Y: batch_ys
})
valid = ds.valid
print(sess.run(accuracy,
feed_dict={
X: valid.images,
Y: valid.labels
}))
# Print final test accuracy:
test = ds.test
print(sess.run(accuracy,
feed_dict={
X: test.images,
Y: test.labels
}))
"""
Explanation: Training the Network
The rest of the code will be exactly the same as before. We chunk the training dataset into batch_size chunks, and let these images help train the network over a number of iterations.
End of explanation
"""
# We first get the graph that we used to compute the network
g = tf.get_default_graph()
# And can inspect everything inside of it
[op.name for op in g.get_operations()]
"""
Explanation: What we should see is the accuracy being printed after each "epoch", or after every run over the entire dataset. Since we're using batches, we use the notion of an "epoch" to denote whenever we've gone through the entire dataset.
<a name="inspecting-the-network"></a>
Inspecting the Trained Network
Let's try and now inspect how the network is accomplishing this task. We know that our network is a single matrix multiplication of our 784 pixel values. The weight matrix, W, should therefore have 784 rows. As outputs, it has 10 values. So the matrix is composed in the linear function as n_input x n_output values. So the matrix is 784 rows x 10 columns.
<TODO: graphic w/ wacom showing network and matrix multiplication and pulling out single neuron/column>
In order to get this matrix, we could have had our linear function return the tf.Tensor. But since everything is part of the tensorflow graph, and we've started using nice names for all of our operations, we can actually find this tensor using tensorflow:
End of explanation
"""
W = g.get_tensor_by_name('layer1/W:0')
"""
Explanation: Looking at the names of the operations, we see there is one linear/W. But this is the tf.Operation. Not the tf.Tensor. The tensor is the result of the operation. To get the result of the operation, we simply add ":0" to the name of the operation:
End of explanation
"""
W_arr = np.array(W.eval(session=sess))
print(W_arr.shape)
"""
Explanation: We can use the existing session to compute the current value of this tensor:
End of explanation
"""
fig, ax = plt.subplots(1, 10, figsize=(20, 3))
for col_i in range(10):
ax[col_i].imshow(W_arr[:, col_i].reshape((28, 28)), cmap='coolwarm')
"""
Explanation: And now we have our tensor! Let's try visualizing every neuron, or every column of this matrix:
End of explanation
"""
from tensorflow.python.framework.ops import reset_default_graph
reset_default_graph()
"""
Explanation: We're going to use the coolwarm color map, which will use "cool" values, or blue-ish colors for low values. And "warm" colors, red, basically, for high values. So what we begin to see is that there is a weighting of all the input values, where pixels that are likely to describe that number are being weighted high, and pixels that are not likely to describe that number are being weighted low. By summing all of these multiplications together, the network is able to begin to predict what number is in the image. This is not a very good network though, and the representations it learns could still do a much better job. We were only right about 93% of the time according to our accuracy. State of the art models will get about 99.9% accuracy.
<a name="convolutional-networks"></a>
Convolutional Networks
To get better performance, we can build a convolutional network. We've already seen how to create a convolutional network with our unsupervised model. We're going to make the same modifications here to help us predict the digit labels in MNIST.
Defining the Network
I'll first reset the current graph, so we can build a new one. We'll use tensorflow's nice helper function for doing this.
End of explanation
"""
# We first get the graph that we used to compute the network
g = tf.get_default_graph()
# And can inspect everything inside of it
[op.name for op in g.get_operations()]
"""
Explanation: And just to confirm, let's see what's in our graph:
End of explanation
"""
# We'll have placeholders just like before which we'll fill in later.
ds = datasets.MNIST(one_hot=True, split=[0.8, 0.1, 0.1])
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
"""
Explanation: Great. Empty.
Now let's get our dataset, and create some placeholders like before:
End of explanation
"""
X_tensor = tf.reshape(X, [-1, 28, 28, 1])
"""
Explanation: Since X is currently [batch, height*width], we need to reshape to a
4-D tensor to use it in a convolutional graph. Remember, in order to perform convolution, we have to use 4-dimensional tensors describing the:
N x H x W x C
We'll reshape our input placeholder by telling the shape parameter to be these new dimensions and we'll use -1 to denote this dimension should not change size.
End of explanation
"""
filter_size = 5
n_filters_in = 1
n_filters_out = 32
W_1 = tf.get_variable(
name='W',
shape=[filter_size, filter_size, n_filters_in, n_filters_out],
initializer=tf.random_normal_initializer())
"""
Explanation: We'll now setup the first convolutional layer. Remember that the weight matrix for convolution should be
[height x width x input_channels x output_channels]
Let's create 32 filters. That means every location in the image, depending on the stride I set when we perform the convolution, will be filtered by this many different kernels. In session 1, we convolved our image with just 2 different types of kernels. Now, we're going to let the computer try to find out what 32 filters helps it map the input to our desired output via our training signal.
End of explanation
"""
b_1 = tf.get_variable(
name='b',
shape=[n_filters_out],
initializer=tf.constant_initializer())
"""
Explanation: Bias is always [output_channels] in size.
End of explanation
"""
h_1 = tf.nn.relu(
tf.nn.bias_add(
tf.nn.conv2d(input=X_tensor,
filter=W_1,
strides=[1, 2, 2, 1],
padding='SAME'),
b_1))
"""
Explanation: Now we can build a graph which does the first layer of convolution:
We define our stride as batch x height x width x channels. This has the effect of resampling the image down to half of the size.
End of explanation
"""
n_filters_in = 32
n_filters_out = 64
W_2 = tf.get_variable(
name='W2',
shape=[filter_size, filter_size, n_filters_in, n_filters_out],
initializer=tf.random_normal_initializer())
b_2 = tf.get_variable(
name='b2',
shape=[n_filters_out],
initializer=tf.constant_initializer())
h_2 = tf.nn.relu(
tf.nn.bias_add(
tf.nn.conv2d(input=h_1,
filter=W_2,
strides=[1, 2, 2, 1],
padding='SAME'),
b_2))
"""
Explanation: And just like the first layer, add additional layers to create a deep net.
End of explanation
"""
# We'll now reshape so we can connect to a fully-connected/linear layer:
h_2_flat = tf.reshape(h_2, [-1, 7 * 7 * n_filters_out])
"""
Explanation: 4d -> 2d
End of explanation
"""
# NOTE: This uses a slightly different version of the linear function than the lecture!
h_3, W = utils.linear(h_2_flat, 128, activation=tf.nn.relu, name='fc_1')
"""
Explanation: Create a fully-connected layer:
End of explanation
"""
# NOTE: This uses a slightly different version of the linear function than the lecture!
Y_pred, W = utils.linear(h_3, n_output, activation=tf.nn.softmax, name='fc_2')
"""
Explanation: And one last fully-connected layer which will give us the correct number of outputs, and use a softmax to expoentially scale the outputs and convert them to a probability:
End of explanation
"""
cross_entropy = -tf.reduce_sum(Y * tf.log(Y_pred + 1e-12))
optimizer = tf.train.AdamOptimizer().minimize(cross_entropy)
"""
Explanation: <TODO: Draw as graphical representation>
Training the Network
The rest of the training process is the same as the previous network. We'll define loss/eval/training functions:
End of explanation
"""
correct_prediction = tf.equal(tf.argmax(Y_pred, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
"""
Explanation: Monitor accuracy:
End of explanation
"""
sess = tf.Session()
sess.run(tf.initialize_all_variables())
"""
Explanation: And create a new session to actually perform the initialization of all the variables:
End of explanation
"""
batch_size = 50
n_epochs = 10
for epoch_i in range(n_epochs):
for batch_xs, batch_ys in ds.train.next_batch():
sess.run(optimizer, feed_dict={
X: batch_xs,
Y: batch_ys
})
valid = ds.valid
print(sess.run(accuracy,
feed_dict={
X: valid.images,
Y: valid.labels
}))
# Print final test accuracy:
test = ds.test
print(sess.run(accuracy,
feed_dict={
X: test.images,
Y: test.labels
}))
"""
Explanation: Then we'll train in minibatches and report accuracy:
End of explanation
"""
from libs.utils import montage_filters
W1 = sess.run(W_1)
plt.figure(figsize=(10, 10))
plt.imshow(montage_filters(W1), cmap='coolwarm', interpolation='nearest')
"""
Explanation: <TODO: Fun timelapse of waiting>
Inspecting the Trained Network
Let's take a look at the kernels we've learned using the following montage function, similar to the one we've been using for creating image montages, except this one is suited for the dimensions of convolution kernels instead of 4-d images. So it has the height and width first, unlike images which have batch then height then width. We'll use this function to visualize every convolution kernel in the first and second layers of our network.
End of explanation
"""
W2 = sess.run(W_2)
plt.imshow(montage_filters(W2 / np.max(W2)), cmap='coolwarm')
"""
Explanation: What we're looking at are all of the convolution kernels that have been learned. Compared to the previous network we've learned, it is much harder to understand what's happening here. But let's try and explain these a little more. The kernels that have been automatically learned here are responding to edges of different scales, orientations, and rotations. It's likely these are really describing parts of letters, or the strokes that make up letters. Put another way, they are trying to get at the "information" in the image by seeing what changes.
That's a pretty fundamental idea. That information would be things that change. Of course, there are filters for things that aren't changing as well. Some filters may even seem to respond to things that are mostly constant. However, if our network has learned a lot of filters that look like that, it's likely that the network hasn't really learned anything at all. The flip side of this is if the filters all look more or less random. That's also a bad sign.
Let's try looking at the second layer's kernels:
End of explanation
"""
import os
sess = tf.Session()
init_op = tf.initialize_all_variables()
saver = tf.train.Saver()
sess.run(init_op)
if os.path.exists("model.ckpt"):
saver.restore(sess, "model.ckpt")
print("Model restored.")
"""
Explanation: It's really difficult to know what's happening here. There are many more kernels in this layer. They've already passed through a set of filters and an additional non-linearity. How can we really know what the network is doing to learn its objective function? The important thing for now is to see that most of these filters are different, and that they are not all constant or uniformly activated. That means it's really doing something, but we aren't really sure yet how to see how that effects the way we think of and perceive the image. In the next session, we'll learn more about how we can start to interrogate these deeper representations and try to understand what they are encoding. Along the way, we'll learn some pretty amazing tricks for producing entirely new aesthetics that eventually led to the "deep dream" viral craze.
<a name="savingloading-models"></a>
Saving/Loading Models
Tensorflow provides a few ways of saving/loading models. The easiest way is to use a checkpoint. Though, this really useful while you are training your network. When you are ready to deploy or hand out your network to others, you don't want to pass checkpoints around as they contain a lot of unnecessary information, and it also requires you to still write code to create your network. Instead, you can create a protobuf which contains the definition of your graph and the model's weights. Let's see how to do both:
<a name="checkpoint"></a>
Checkpoint
Creating a checkpoint requires you to have already created a set of operations in your tensorflow graph. Once you've done this, you'll create a session like normal and initialize all of the variables. After this, you create a tf.train.Saver which can restore a previously saved checkpoint, overwriting all of the variables with your saved parameters.
End of explanation
"""
save_path = saver.save(sess, "./model.ckpt")
print("Model saved in file: %s" % save_path)
"""
Explanation: Creating the checkpoint is easy. After a few iterations of training, depending on your application say between 1/10 of the time to train the full model, you'll want to write the saved model. You can do this like so:
End of explanation
"""
path='./'
ckpt_name = 'model.ckpt'
fname = 'model.tfmodel'
dst_nodes = ['Y']
g_1 = tf.Graph()
with tf.Session(graph=g_1) as sess:
x = tf.placeholder(tf.float32, shape=(1, 224, 224, 3))
# Replace this with some code which will create your tensorflow graph:
net = create_network()
sess.run(tf.initialize_all_variables())
saver.restore(sess, ckpt_name)
graph_def = tf.python.graph_util.convert_variables_to_constants(
sess, sess.graph_def, dst_nodes)
g_2 = tf.Graph()
with tf.Session(graph=g_2) as sess:
tf.train.write_graph(
tf.python.graph_util.extract_sub_graph(
graph_def, dst_nodes), path, fname, as_text=False)
"""
Explanation: <a name="protobuf"></a>
Protobuf
The second way of saving a model is really useful for when you don't want to pass around the code for producing the tensors or computational graph itself. It is also useful for moving the code to deployment or for use in the C++ version of Tensorflow. To do this, you'll want to run an operation to convert all of your trained parameters into constants. Then, you'll create a second graph which copies the necessary tensors, extracts the subgraph, and writes this to a model. The summarized code below shows you how you could use a checkpoint to restore your models parameters, and then export the saved model as a protobuf.
End of explanation
"""
with open("model.tfmodel", mode='rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(net['graph_def'], name='model')
"""
Explanation: When you wanted to import this model, now you wouldn't need to refer to the checkpoint or create the network by specifying its placeholders or operations. Instead, you'd use the import_graph_def operation like so:
End of explanation
"""
|
LSSTC-DSFP/LSSTC-DSFP-Sessions | Sessions/Session14/Day2/DeeplearningSolutions.ipynb | mit | !pip install astronn
import torch
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import accuracy_score, confusion_matrix, ConfusionMatrixDisplay
"""
Explanation: Classification with a Multi-layer Perceptron (MLP)
Author: V. Ashley Villar
In this problem set, we will not be implementing neural networks from scratch. Yesterday, you built a perceptron in Python. Multi-layer perceptrons (MLPs) are, as discussed in the lecture, several layers of these perceptrons stacked. Here, we will learn how to use one of the most common modules for building neural networks: Pytorch
End of explanation
"""
from astroNN.datasets import load_galaxy10
images, labels_original = load_galaxy10()
from astroNN.datasets.galaxy10 import galaxy10cls_lookup
%matplotlib inline
# Plot an example image from each class
# First, find an example of each class
uclasses, counts = np.unique(labels_original,return_counts=True)
print(len(labels_original))
for i, uclass in enumerate(uclasses):
print(uclass,counts[i])
first_example = np.where(labels_original==uclass)[0][0]
plt.imshow(images[first_example])
plt.title(galaxy10cls_lookup(uclass))
plt.show()
"""
Explanation: Problem 1: Understanding the Data
For this problem set, we will use the Galaxy10 dataset made available via the astroNN module. This dataset is made up of 17736 images of galaxies which have been labelled by hand. See this link for more information.
First we will visualize our data.
Problem 1a Show one example of each class as an image.
End of explanation
"""
plt.hist(labels_original)
plt.xlabel('Class Label')
plt.show()
#Only work with 1 and 2
gind = np.where((labels_original==1) | (labels_original==2))
images_top_two = images[gind]
labels_top_two = labels_original[gind]
"""
Explanation: Problem 2b Make a histogram showing the fraction of each class
Keep only the top two classes (i.e., the classes with the most galaxies)
End of explanation
"""
import torch.nn.functional as F
torch.set_default_dtype(torch.float)
labels_top_two_one_hot = F.one_hot(torch.tensor(labels_top_two - np.min(labels_top_two)).long(), num_classes=2)
images_top_two = torch.tensor(images_top_two).float()
labels_top_two_one_hot = labels_top_two_one_hot.float()
# we're going to flatten the images for our MLP
images_top_two_flat = images_top_two.reshape(len(images_top_two),-1)
#Normalize the flux of the images here
images_top_two_flat = (images_top_two_flat - torch.mean(images_top_two_flat))/torch.std(images_top_two_flat)
"""
Explanation: This next block of code converts the data to a format which is more compatible with our neural network.
End of explanation
"""
from sklearn.model_selection import train_test_split
images_train, images_test, labels_train, labels_test = train_test_split(
images_top_two_flat, labels_top_two_one_hot, test_size=0.33, random_state=42)
np.shape(images_train)
"""
Explanation: Problem 2c Split the data into a training and test set (66/33 split) using the train_test_split function from sklearn
End of explanation
"""
class MLP(torch.nn.Module):
# this defines the model
def __init__(self, input_size, hidden_size):
super(MLP, self).__init__()
print(input_size,hidden_size)
self.input_size = input_size
self.hidden_size = hidden_size
self.hiddenlayer = torch.nn.Linear(self.input_size, self.hidden_size)
self.outputlayer = torch.nn.Linear(self.hidden_size, 2)
self.sigmoid = torch.nn.Sigmoid()
self.softmax = torch.nn.Softmax()
def forward(self, x):
layer1 = self.hiddenlayer(x)
activation = self.sigmoid(layer1)
layer2 = self.outputlayer(activation)
activation2 = self.sigmoid(layer1)
layer3 = self.outputlayer(activation2)
output = self.softmax(layer3)
return output
"""
Explanation: The next cell will outline how one can make a MLP with pytorch.
Problem 3a Talk to a partner about how this code works, line by line. Add another hidden layer which is the same size as the first hidden layer.
End of explanation
"""
# train the model
def train_model(training_data,training_labels, test_data,test_labels, model):
# define the optimization
criterion = torch.nn.BCELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.007,momentum=0.9)
for epoch in range(100):
# clear the gradient
optimizer.zero_grad()
# compute the model output
myoutput = model(training_data)
# calculate loss
loss = criterion(myoutput, training_labels)
# credit assignment
loss.backward()
# update model weights
optimizer.step()
# STUDENTS ADD THIS PART
output_test = model(test_data)
loss_test = criterion(output_test, test_labels)
plt.plot(epoch,loss.detach().numpy(),'ko')
plt.plot(epoch,loss_test.detach().numpy(),'ro')
print(epoch,loss.detach().numpy())
plt.show()
"""
Explanation: The next block of code will show how one can train the model for 100 epochs. Note that we use the binary cross-entropy as our objective function and stochastic gradient descent as our optimization method.
Problem 3b Edit the code so that the function plots the loss for the training and test loss for each epoch.
End of explanation
"""
model = MLP(np.shape(images_train[0])[0],50)
train_model(images_train, labels_train, images_test, labels_test, model)
"""
Explanation: The next block trains the code, assuming a hidden layer size of 100 neurons.
Problem 3c Change the learning rate lr to minimize the cross entropy score
End of explanation
"""
# evaluate the model
def evaluate_model(data,labels, model):
yhat = model(data)
yhat = yhat.detach().numpy()
best_class = np.argmax(yhat,axis=1)
acc = accuracy_score(best_class,np.argmax(labels,axis=1))
return(acc)
# evaluate the model
acc = evaluate_model(images_test,labels_test, model)
print('Accuracy: %.3f' % acc)
"""
Explanation: Write a function called evaluate_model which takes the image data, labels and model as input, and the accuracy as output. you can use the accuracy_score function.
End of explanation
"""
yhat = model(images_test)
yhat = yhat.detach().numpy()
best_class = np.argmax(yhat,axis=1)
truth = np.argmax(labels_test,axis=1)
cm = confusion_matrix(truth,best_class)
disp = ConfusionMatrixDisplay(confusion_matrix=cm)
disp.plot()
plt.show()
"""
Explanation: Problem 3d make a confusion matrix for the test set
End of explanation
"""
class MLP_new(torch.nn.Module):
# this defines the model
def __init__(self, input_size, hidden_size):
super(MLP_new, self).__init__()
print(input_size,hidden_size)
self.input_size = input_size
self.hidden_size = hidden_size
self.hiddenlayer = torch.nn.Linear(self.input_size, self.hidden_size)
self.outputlayer = torch.nn.Linear(self.hidden_size, 3)
self.sigmoid = torch.nn.Sigmoid()
self.softmax = torch.nn.Softmax()
def forward(self, x):
layer1 = self.hiddenlayer(x)
activation = self.sigmoid(layer1)
layer2 = self.outputlayer(activation)
activation2 = self.sigmoid(layer1)
layer3 = self.outputlayer(activation2)
output = self.softmax(layer3)
return output
#Only work with 0,1,2
gind = np.where((labels_original==0) | (labels_original==1) | (labels_original==2))
images_top_three = images[gind]
labels_top_three = labels_original[gind]
x,counts = np.unique(labels_top_three,return_counts=True)
print(counts)
torch.set_default_dtype(torch.float)
labels_top_three_one_hot = F.one_hot(torch.tensor(labels_top_three - np.min(labels_top_three)).long(), num_classes=3)
images_top_three = torch.tensor(images_top_three).float()
labels_top_three_one_hot = labels_top_three_one_hot.float()
# we're going to flatten the images for our MLP
images_top_three_flat = images_top_three.reshape(len(images_top_three),-1)
#Normalize the flux of the images here
images_top_three_flat = (images_top_three_flat - torch.mean(images_top_three_flat))/torch.std(images_top_three_flat)
images_train, images_test, labels_train, labels_test = train_test_split(
images_top_three_flat, labels_top_three_one_hot, test_size=0.33, random_state=42)
# train the model
def train_model(training_data,training_labels, test_data,test_labels, model):
# define the optimization
criterion = torch.nn.CrossEntropyLoss(weight=torch.Tensor(np.sum(counts)/counts))
optimizer = torch.optim.SGD(model.parameters(), lr=0.005,momentum=0.9)
for epoch in range(100):
# clear the gradient
optimizer.zero_grad()
# compute the model output
myoutput = model(training_data)
# calculate loss
loss = criterion(myoutput, training_labels)
# credit assignment
loss.backward()
# update model weights
optimizer.step()
# STUDENTS ADD THIS PART
output_test = model(test_data)
loss_test = criterion(output_test, test_labels)
plt.plot(epoch,loss.detach().numpy(),'ko')
plt.plot(epoch,loss_test.detach().numpy(),'ro')
print(epoch,loss.detach().numpy())
plt.show()
model = MLP_new(np.shape(images_train[0])[0],50)
train_model(images_train, labels_train, images_test, labels_test, model)
# evaluate the model
def evaluate_model(data,labels, model):
yhat = model(data)
yhat = yhat.detach().numpy()
best_class = np.argmax(yhat,axis=1)
acc = accuracy_score(best_class,np.argmax(labels,axis=1))
return(acc)
# evaluate the model
acc = evaluate_model(images_test,labels_test, model)
print('Accuracy: %.3f' % acc)
yhat = model(images_test)
yhat = yhat.detach().numpy()
best_class = np.argmax(yhat,axis=1)
truth = np.argmax(labels_test,axis=1)
cm = confusion_matrix(truth,best_class)
disp = ConfusionMatrixDisplay(confusion_matrix=cm)
disp.plot()
plt.show()
"""
Explanation: Challenge Problem Add a third class to your classifier and begin accounting for uneven classes. There are several steps to this:
Edit the neural network to output 3 classes
Change the criterion to a custom criterion function, such that the entropy of each class is weighted by the inverse fraction of each class size (e.g., if the galaxy class breakdowns are 1:2:3, the weights would be 6:3:2).
End of explanation
"""
|
CPernet/LanguageDecision | notebooks/exploratory/2017-05-17-hddm-exploration.ipynb | gpl-3.0 | %matplotlib inline
"""
Explanation: Exploring hddm
End of explanation
"""
import hddm
# Load csv data - converted to numpy array
data = hddm.load_csv('../examples/hddm_simple.csv')
# Create hddm model object
model = hddm.HDDM(data, depends_on={'v': 'difficulty'})
# Markov chain Monte Carlo sampling
model.sample(2000, burn=20)
"""
Explanation: Quick-Start Tutorial
As found in the hddm repo README file - see https://github.com/hddm-devs/hddm
End of explanation
"""
# Model fitted parameters & summary stats
model.print_stats()
model.print_stats().__class__
"""
Explanation: Notes on MCMC sampling:
End of explanation
"""
# Fit posterior RT distributions
model.plot_posteriors()
# Plot theoretical RT distributions
model.plot_posterior_predictive()
"""
Explanation: print_stats() is literally just a printer - it doesn't return a data structure. Could parse this info in a nice python data structure?
End of explanation
"""
|
probcomp/bdbcontrib | examples/satellites/querying-and-plotting.ipynb | apache-2.0 | import os
import subprocess
if not os.path.exists('satellites.bdb'):
subprocess.check_call(['curl', '-O', 'http://probcomp.csail.mit.edu/bayesdb/downloads/satellites.bdb'])
"""
Explanation: Querying a Population and Plotting the Results
Before we can query a population, we must have one. We will use a population of satellites as an example.
In this portion of the tutorial we will query and plot the undigested population data, and not their implications. All of the queries here are analogous to similar or identical queries in Structured Query Language (SQL).
For simplicity we still use the same pre-analyzed population database (.bdb file) as in other portions of the tutorial, even though it is not important that any probabilistic analysis has been done. If we have not yet downloaded that pre-analyzed database, do so now:
End of explanation
"""
from bdbcontrib import Population
satellites = Population(name='satellites', bdb_path='satellites.bdb')
"""
Explanation: We construct a Population instance that helps us read, query, and visualize a particular population.
End of explanation
"""
satellites.q("""
SELECT * FROM satellites
WHERE Name LIKE 'International Space Station%'
""").transpose()
satellites.q("""SELECT COUNT(*) FROM satellites;""")
"""
Explanation: Querying the data using SQL
Before querying the implications of a population, it can be useful to look at a sample of the raw data and metadata. This can be done using a combination of ordinary SQL and convenience functions built into bayeslite. We start by finding one of the most well-known satellites, the International Space Station:
End of explanation
"""
satellites.q("""SELECT * FROM satellites WHERE Name LIKE '%GPS%'""").transpose()
"""
Explanation: We can select multiple items using a SQL wildcard, in this case the match-anything '%' on either side of "GPS".
We ask for variables as rows and observations as columns by using .transpose() as we did for the ISS above. By default, observations map to rows, and variables map to columns.
End of explanation
"""
satellites.q("""
SELECT name, dry_mass_kg, period_minutes, class_of_orbit FROM satellites
ORDER BY period_minutes ASC LIMIT 10;
""")
"""
Explanation: Select just a few variables in the data, ordering by the number of minutes it takes for the satellite to complete one orbit, measured in minutes, and sorted ascending (as opposed to DESC), again as in SQL:
End of explanation
"""
%matplotlib inline
"""
Explanation: Note that NaN is ordered before 0 in this sort.
Plots and Graphs
Bayeslite includes statistical graphics procedures designed for easy use with data extracted from an SQL database.
Before we introduce those, let the notebook know that we would like to use and display matplotlib figures within the notebook:
End of explanation
"""
satellites.help("plot")
"""
Explanation: Let's see a menu of the easily available plotting utilities:
End of explanation
"""
help(satellites.pairplot)
"""
Explanation: We will get more detailed help on each plotting utility as we introduce it.
Pairplots — Exploring two variables at a time
The methods pairplot and pairplot_vars are intended to plot all pairs within a group of variables. The plots are arranged as a lower-triangular matrix of plots.
Along the diagonal, there are histograms with the values of the given variable along the x axis, and the counts of occurrences of those values (or bucketed ranges of those values) on the y axis.
The rest of the lower triangle plots the row variable on the y axis against the column variable on the x axis.
Different kinds of plots are used for categorical vs. numeric values.
The fuller documentation:
End of explanation
"""
satellites.pairplot_vars(['purpose', 'power_watts', 'launch_mass_kg'],
colorby='class_of_orbit', show_contour=False);
"""
Explanation: Pairplots: pairplot_vars
pairplot_vars is a shortcut to help you just name the variables you want to see, rather than writing the BQL to select those variables. As we will see, you may often start with pairplot_vars, and decide to refine your query in BQL to focus on particular areas of interest:
End of explanation
"""
satellites.pairplot("""SELECT purpose, power_watts, launch_mass_kg, class_of_orbit
FROM satellites
WHERE purpose LIKE '%Meteorology%';""",
colorby='class_of_orbit', show_contour=False);
"""
Explanation: Pairplots: with SQL WHERE
The purposes are hard to read, and we may not be interested in all of them. Say we're interested only in meteorology satellites of one variety or another. It's easy to restrict to just those if you use pairplot instead of pairplot_vars, and use a bit of extra BQL:
End of explanation
"""
satellites.pairplot("""SELECT purpose, power_watts, launch_mass_kg, class_of_orbit
FROM satellites
WHERE purpose LIKE '%Meteorology%';""",
colorby='class_of_orbit', show_contour=True);
"""
Explanation: We might learn that meteorology satellites in geosynchronous orbit use about as much or more power than meteorology satellites in low-earth orbit (see power_watts row of plots), but that they use a little less power at a given mass (see scatter of launch mass vs. power_watts), and that there are no meteorology satellites in medium earth orbit or in elliptical orbits (class_of_orbit color legend box).
An expert might be able to help us interpret these observations, e.g. why certain orbits are preferred for meteorology, what the driving considerations are for power consumption and launch mass, etc., but pairplots are a powerful tool for visually finding questions to ask.
Pairplots: show_contour
Why did we choose not to show contours? Let's try:
End of explanation
"""
satellites.pairplot("""SELECT power_watts, launch_mass_kg
FROM satellites""",
show_contour=True);
"""
Explanation: So when we show them, the way the underlying plotting utility works, we see suggestions of negative wattages and masses!
The contours in the power vs. mass plot also obscure the small number of data points, lending a false sense of meaning.
When there are enough data points, it can be useful to plot kernel density estimators (contours) on each plot, to see tendencies overlaid above the data points, so long as one keeps the above shortcomings in mind:
End of explanation
"""
satellites.pairplot_vars(['purpose', 'class_of_orbit']);
"""
Explanation: Pairplots: two categoricals
Where two variables are both categorical, we show a 2d histogram (a heatmap).
Also, we can turn off the one-variable histograms along the diagonal:
End of explanation
"""
satellites.pairplot("""SELECT purpose, class_of_orbit FROM %t
GROUP BY purpose
HAVING COUNT(purpose) >= 5;""");
"""
Explanation: Pairplots: with SQL HAVING
We can use the usual SQL constructs to restrict our plot. For example, in this plot of users vs. countries, restrict to those purposes that have at least five satellites:
End of explanation
"""
satellites.q('''SELECT apogee_km FROM %t WHERE period_minutes is NULL;''')
"""
Explanation: Pairplots: with show_missing and NULL values.
End of explanation
"""
satellites.pairplot_vars(['period_minutes', 'apogee_km'], show_missing=True);
"""
Explanation: When we pairplot these, normally that data point would simply be missing, but with show_missing, there is a line indicating that period_minutes could be anything at an apogee around 35k.
End of explanation
"""
satellites.pairplot("""
SELECT period_minutes / 60.0 as period_hours,
apogee_km / 1000.0 as apogee_x1000km FROM %t""",
show_missing=True, show_contour=False);
"""
Explanation: Pairplots: with SQL arithmetic
The values are large enough to be hard to read, but of course we can resolve that in the query:
End of explanation
"""
help(satellites.barplot)
satellites.barplot("""
SELECT class_of_orbit, count(*) AS class_count FROM satellites
GROUP BY class_of_orbit
ORDER BY class_count DESC
""");
"""
Explanation: Other Plot Types
Barplot
End of explanation
"""
satellites.barplot("""
SELECT class_of_orbit || "--" || type_of_orbit as class_type,
count(*) AS class_type_count
FROM satellites
GROUP BY class_type
ORDER BY class_type_count DESC
""");
"""
Explanation: Let's add the type of orbit too:
End of explanation
"""
satellites.barplot("""
SELECT class_of_orbit || "--" || type_of_orbit as class_type,
sum(power_watts)/count(*) AS average_power
FROM satellites
GROUP BY class_type
ORDER BY average_power DESC
""");
"""
Explanation: One can even do a bit of computation here, in this case computing and plotting the average power_watts, rather than the merely the count:
End of explanation
"""
help(satellites.histogram)
satellites.histogram("""SELECT dry_mass_kg FROM %t""", nbins=35);
"""
Explanation: Histogram
End of explanation
"""
satellites.histogram("""
SELECT dry_mass_kg, class_of_orbit FROM satellites
WHERE dry_mass_kg < 5000
""", nbins=15, normed=True);
"""
Explanation: We can break down that silhouette according to a categorical column that comes second.
We can also show percentages rather than absolute counts using normed.
End of explanation
"""
help(satellites.heatmap)
satellites.heatmap("""
SELECT users, country_of_operator, COUNT(country_of_operator) as country_count FROM %t
GROUP BY country_of_operator
HAVING COUNT(country_of_operator) >= 5;
""")
"""
Explanation: Heatmap (a.k.a. 2d histogram)
End of explanation
"""
satellites.heatmap("""
SELECT users, country_of_operator, COUNT(country_of_operator) as country_count FROM %t
GROUP BY country_of_operator
HAVING COUNT(country_of_operator) >= 5;""",
figsize=(12, 10))
"""
Explanation: Figsize
But that's a bit too small to read. For most of these plot functions, you can specify a figure size as a tuple (width-in-inches, height-in-inches):
End of explanation
"""
|
louridas/rwa | content/notebooks/chapter_10.ipynb | bsd-2-clause | import csv
import pprint
with open('ballots.csv') as ballots_file:
reader = csv.reader(ballots_file)
ballots = list(reader)
pprint.pprint(ballots, width=30)
"""
Explanation: Voting Strengths
Chapter 10 of Real World Algorithms.
Panos Louridas<br />
Athens University of Economics and Business
The Schulze Method
To start with the Schulze method, we need a way to input ballots.
We assume that the ballots are saved in a file, one ballot per line. In each line, that is, ballot, the candidates are listed in decreasing preference.
We'll use the file ballots.csv as an example. The file is in Comma-separated Values (CSV) format.
So, the first line is:
D,B,A,C
which means that the first preference of the voter is candidate D, then B, then A, then C.
Although seemingly simple, CSV is a treacherous format.
There are many details than one would think at first sight; for example, what happens if a field in the line contains a comma, could we have different delimiters, etc.
For that reason, you should always use a ready-made library for handling CVS files.
Our ballots file is simple, but there is no reason not to use Python's CSV library anyway.
We'll get all the ballots and we'll put them into a list.
End of explanation
"""
from collections import defaultdict
candidates = {
'A': 0,
'B': 1,
'C': 2,
'D': 3
}
def calc_pairwise_prefs(ballots, candidates):
# Initialize p to 0.
p = [ [0 for j in candidates.keys() ] for i in candidates.keys() ]
# Take each ballot in turn.
for ballot in ballots:
# Take each candidate in the ballot.
for i, c_i in enumerate(ballot):
# Take all following candidates in the ballot.
for c_j in ballot[i+1:]:
# Add to the score of c_i vs c_j.
p[candidates[c_i]][candidates[c_j]] += 1
return p
p = calc_pairwise_prefs(ballots, candidates)
pprint.pprint(p, width=20)
"""
Explanation: The first step in the Schulze method is to calculate the pairwise preferences of the voters regarding the candidates.
That is an array $P$, such that element $P[c_j, c_k]$ shows how many voters prefer candidate $c_j$ to candidate $c_k$.
As our candidates are given by characters, we'll assign a number, starting from zero, to each of the candidates, so that we'll be able to use integer-based indices.
End of explanation
"""
def create_election_graph(p):
n = len(p)
g = [ [-1 for j in range(n) ] for i in range(n) ]
for i in range(n):
for j in range(n):
if p[i][j] > p[j][i]:
g[i][j] = p[i][j] - p[j][i]
return g
"""
Explanation: The second step in the Schulze method is to create an election graph.
This will be represented by an adjacency matrix.
If for two candidates $c_i$ and $c_j$ the number $P[c_i, c_j]$ of voters that prefer $c_i$ over $c_j$ is greater than the number of voters $P[c_j, c_i]$ that prefer $c_j$ over $c_i$, we add the link $c_i \rightarrow c_j$ and we assign the number $P[c_i, c_j] - P[c_j, c_i]$ as the weight of the link $c_i \rightarrow c_j$.
We'll assign the value $-1$ for all other pairs (or $-\infty$, but as $-1$ is not a valid weight, it will also do).
End of explanation
"""
g = create_election_graph(p)
pprint.pprint(g, width=20)
"""
Explanation: We can then see the adjacency matrix for our election example:
End of explanation
"""
def calc_strongest_paths(p):
n = len(p)
# Initialize strongest paths array.
s = [ [ -1 for j in range(n) ] for i in range(n) ]
# Initialize predecessors array.
pred = [ [ -1 for j in range(n) ] for i in range(n) ]
# Initially the strength of the path s[i][j] is simply
# the difference in the weights between p[i][j]
# and p[j][i].
for i in range(n):
for j in range(n):
if p[i][j] > p[j][i]:
s[i][j] = p[i][j] - p[j][i]
pred[i][j] = i
# For each k, i, j, such that the path from i to j
# can be strengthened by taking the detour from i to k
# and k to j adjust the path and the predecessor.
# This can happen at most n times.
for k in range(n):
for i in range(n):
if i != k:
for j in range(n):
if j != i:
if s[i][j] < min(s[i][k], s[k][j]):
s[i][j] = min(s[i][k], s[k][j])
pred[i][j] = pred[k][j]
return (s, pred)
"""
Explanation: With the adjacency matrix available, we can implement the calculation of the strongest paths.
The function calc_strongest_paths(p, candidates) will take as input the adjacency matrix and the candidates and will return:
* s, a matrix of size $n \times n$ such that s[i][j] is the strongest path between nodes i and j.
* pred, a matrix of size $n \times n$ such that pred[i][j] is the predecessor of node i in the strongest path to node j.
The algorithm finds the strongest paths iteratively, by allowing to use one additional node as intermediate node in the paths in each iteration.
End of explanation
"""
s, pred = calc_strongest_paths(p)
print('strongest paths')
pprint.pprint(s, width=30)
print('predecessors')
pprint.pprint(pred, width=30)
"""
Explanation: We now apply calc_strongest_paths(p) to our example:
End of explanation
"""
def calc_results(s):
n = len(s)
wins = [ [] for i in range(n) ]
for i in range(n):
for j in range(n):
if i != j:
if s[i][j] > s[j][i]:
wins[i].append(j)
return wins
"""
Explanation: The final step in the Schulze algorithm is finding, for each candidate the candidates that are less popular.
That is a matter of comparing s[i][j] and s[j][i].
We implement the logic in calc_results(s).
End of explanation
"""
wins = calc_results(s)
print(wins)
"""
Explanation: Finally, we can find the winner of the election:
End of explanation
"""
from collections import defaultdict
def calc_pairwise_prefs(ballots):
p = defaultdict(int)
for ballot in ballots:
for i, c_i in enumerate(ballot):
for c_j in ballot[i+1:]:
p[(c_i, c_j)] += 1
return p
p = calc_pairwise_prefs(ballots)
pprint.pprint(p)
"""
Explanation: Candidate A wins over C.
Candidate B wins over A, C.
Candidate D wins over A, B, C.
Candidate D wins the election.
The Schulze Method: An Alternative
We can implement the Schulze method with an alternative implementation, in which instead of an adjacency matrix we use a dictionary to represent the preferences.
The logic is entirely the same.
We implement calc_pairwise_prefs(ballots) to return a dictionary p such that p[(c_i, c_j)] shows how many voters prefer candidate c_i to candidate c_j.
The keys to the dictionary are the tuples (c_i, c_j).
Note that we do not need to work with indices instead of the actual voters.
We use a defaultdict(int), so the dictionary will return 0 if (c_i, c_j) is not a key.
Essentially this is like initializing the preferences matrix to zero.
End of explanation
"""
p = calc_pairwise_prefs(ballots)
import itertools
candidates = ['A', 'B', 'C', 'D']
def print_matrix(candidates, matrix, col_width=5):
print(' ', end="")
num_candidates = len(candidates)
for candidate in candidates:
print(f'{candidate:^{col_width}}', end="")
i = 0
for c1, c2 in itertools.product(candidates, repeat=2):
if i % num_candidates == 0:
print()
print(f'{candidates[i // num_candidates]:<2}', end="")
print(f'{matrix[(c1, c2)]:^{col_width}}', end="")
i += 1
print()
print_matrix(candidates, p, 5)
"""
Explanation: The printout of the preferences dictionary is less elegant than the printout of the preferences matrix that we had before.
We can fix that by writing a short helper function that will output our dictionaries in matrix format.
End of explanation
"""
def create_election_graph(p):
g = defaultdict(lambda:-1)
for (c_i, c_j), pref in p.items():
if pref > p[(c_j, c_i)]:
g[(c_i, c_j)] = pref - p[(c_j, c_i)]
return g
"""
Explanation: We then create the election graph.
We use again a dictionary to store the graph. The keys of the dictionary are node tuples and the values are differences in preferences.
Note that not all tuples are actually stored in the dictionary. We store explicitly only the tuples with a positive difference in preferences.
We use a defaultdict(lambda:-1), which will return -1 for any other (non-existing) key, so for all other couples.
End of explanation
"""
g = create_election_graph(p)
print_matrix(candidates, g, 5)
"""
Explanation: In this way we save space.
We can still use print_matrix(candidates, g, 5) to print the dictionary in matrix format.
Only those entries that are not equal to -1 are actually stored in the dictionary.
End of explanation
"""
def calc_strongest_paths(p, candidates):
# Initialize strongest paths dict.
s = defaultdict(lambda:-1)
# Initialize predecessors dict.
pred = defaultdict(lambda:-1)
# Initially the strength of the path from c_i to c_j is simply
# the difference in the weights between s[(c_i, c_j)]
# and s[(c_j, c_i)].
for (c_i, c_j), pref in p.items():
if pref > p[(c_j, c_i)]:
s[(c_i, c_j)] = pref - p[(c_j, c_i)]
pred[(c_i, c_j)] = c_i
# For each c_k, c_i, c_j, such that the path from c_i to c_j
# can be strengthened by taking the detour from c_i to c_k
# and then to c_k and c_j adjust the path and the predecessor.
# This can happen at most as many times as there are candidates.
for c_k in candidates:
for c_i in candidates:
if c_i != c_k:
for c_j in candidates:
if c_j != c_i:
if s[(c_i, c_j)] < min(s[(c_i, c_k)], s[(c_k, c_j)]):
s[(c_i, c_j)] = min(s[(c_i, c_k)], s[(c_k, c_j)])
pred[(c_i, c_j)] = pred[(c_k, c_j)]
return (s, pred)
"""
Explanation: We'll use again defaultdicts to implement calc_strongest_paths(p, candidates).
We need to pass candidates to the function as we no longer use numerical indices, but the actual candidates.
End of explanation
"""
s, pred = calc_strongest_paths(p, candidates)
print('strongest paths')
print_matrix(candidates, s, 5)
print('predecessors')
print_matrix(candidates, pred, 5)
"""
Explanation: We now apply calc_strongest_paths(p, candidates) to our example:
End of explanation
"""
def calc_results(s):
wins = defaultdict(list)
for (c_i, c_j), v in s.items():
if s[(c_i, c_j)] > s[(c_j, c_i)]:
wins[c_i].append(c_j)
return wins
"""
Explanation: Finally, we calculate the results.
We do as before, but we return a dictionary instead.
The keys are the candidates.
The value of a key is a list containing the candidates that lose to the particular candidate indicated by the key.
End of explanation
"""
wins = calc_results(s)
pprint.pprint(wins)
"""
Explanation: So, here are the results again:
End of explanation
"""
import sys
MAX_INT = sys.maxsize
def floyd_warshall(w):
n = len(w)
# Initialize distances matrix.
dist = [ [ MAX_INT for j in range(n) ] for i in range(n) ]
# Initialize predecessors matrix.
pred = [ [ -1 for j in range(n) ] for i in range(n) ]
# Initially the length of the path from i to j is simply
# the weight between w[i][j], if it exists, and then
# i is the predecessor of j.
for i in range(n):
for j in range(n):
if w[i][j] != 0:
dist[i][j] = w[i][j]
pred[i][j] = i
# For each k, i, j, such that the path from i to j
# can be shortened by taking the detour from i to k
# and k to j adjust the path and the predecessor.
# This can happen at most n times.
for k in range(n):
for i in range(n):
if i != k:
for j in range(n):
if j != i:
if (dist[i][k] != MAX_INT and
dist[k][j] != MAX_INT and
dist[i][j] > dist[i][k] + dist[k][j]):
dist[i][j] = dist[i][k] + dist[k][j]
pred[i][j] = pred[k][j]
return (dist, pred)
"""
Explanation: Floyd-Warshall All Pairs Shortest Paths
The strongest paths is a variation of the Floyd-Warshall all pairs shortest paths algorithm.
As with the strongest paths, it finds shortest paths by using more and more nodes as intermediaries.
End of explanation
"""
def read_graph(filename, directed=False):
graph = {}
with open(filename) as input_file:
for line in input_file:
parts = line.split()
if len(parts) != 3:
continue # not a valid line, ignore
[n1, n2, w] = [ int (x) for x in parts ]
if n1 not in graph:
graph[n1] = []
if n2 not in graph:
graph[n2] = []
graph[n1].append((n2, w))
if not directed:
graph[n2].append((n1, w))
return graph
"""
Explanation: We'll use the algorithm on the familiar traffic_grid_graph.txt algorithm.
<img width="400" src="traffic_grid_graph.png"/>
Here is the function that reads the graph:
End of explanation
"""
g = read_graph('traffic_grid_graph.txt')
pprint.pprint(g)
"""
Explanation: We go ahead and read it:
End of explanation
"""
def adjlist_to_matrix(g):
m = [ [ MAX_INT for j in g.keys() ] for i in g.keys() ]
for u in g.keys():
m[u][u] = 0
for u in g.keys():
for (v, w) in g[u]:
m[u][v] = w
return m
"""
Explanation: Our implementation of the Floyd-Warshall algorithms requires an adjacency matrix as input.
So, we'll use a function that converts the graph from an adjacency list representation to an adjacency matrix one.
End of explanation
"""
m = adjlist_to_matrix(g)
dist, pred = floyd_warshall(m)
for s in sorted(g.keys()):
print('starting node:', s)
print(pred[s])
print(dist[s])
"""
Explanation: We do the conversion, and then we run the Floyd-Warshall algorithm.
End of explanation
"""
for i in range(len(dist)):
dist[i][i] = 0
for s in sorted(g.keys()):
print('starting node:', s)
print(pred[s])
print(dist[s])
"""
Explanation: You may have noticed than the distance of a node to itself has been set to MAX_INT.
If that bothers us, and we like it to fix it to zero, that's easy to do:
End of explanation
"""
|
cloudmesh/book | notebooks/machinelearning/precisionrecall.ipynb | apache-2.0 | from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
import numpy as np
"""
Explanation: Precision and Recall
In machine learning model, we have mentioned that, there is an important concept called metrics. However, for classifications problems, accuracy is one of the metrics. There are other important metrics.
In this exercise, we will test our model with new metrics: Precision and Recall
Please answer Questions
To help you understand precision and recall. Please answer questions below by searching and input your answers.
Question 1: What is your understanding of these terms: true postive, false postive, true negative, false negative?
Question 2: What are the relationships between those terms and precision or recall?
Please write down your answer by two simple methematical equation
Answer: Please double click the cell and input your answer here.
End of explanation
"""
#Let's load iris data again
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Let's split the data to training and testing data.
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Limit to the two first classes, and split into training and test
X_train, X_test, y_train, y_test = train_test_split(X[y < 2], y[y < 2],
test_size=.5,
random_state=random_state)
# Create a simple classifier
classifier = svm.LinearSVC(random_state=random_state)
# How could we fit the model? Please find your solutions from our example, and write down your code to fit the svm model
# from training data.
# After you have fit the model, then we make predicions.
y_score = classifier.decision_function(X_test)
"""
Explanation: Below is an example for how to get precision of your model
Attention : You need to finish one line of code to implement the whole example.
End of explanation
"""
from sklearn.metrics import average_precision_score
average_precision = average_precision_score(y_test, y_score)
print('Average precision-recall score: {0:0.2f}'.format(
average_precision))
"""
Explanation: Get the average precision score, Run the cell below
End of explanation
"""
|
kazzz24/deep-learning | language-translation/dlnd_language_translation.ipynb | mit | """
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
"""
Explanation: Language Translation
In this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French.
Get the Data
Since translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus.
End of explanation
"""
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
def single_text_to_ids(text, vocab_to_int, add_EOS):
id_text = []
for sentence in text.split('\n'):
id_sentence = []
for word in sentence.split():
id_sentence.append(vocab_to_int[word])
if add_EOS:
id_sentence.append(vocab_to_int['<EOS>'])
#print(sentence)
#print(id_sentence)
id_text.append(id_sentence)
#print(id_text)
return id_text
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
"""
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
"""
# TODO: Implement Function
#print(source_text)
#print(target_text)
#print(source_vocab_to_int)
#print(target_vocab_to_int)
source_id_text = single_text_to_ids(source_text, source_vocab_to_int, False)
target_id_text = single_text_to_ids(target_text, target_vocab_to_int, True)
return source_id_text, target_id_text
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_text_to_ids(text_to_ids)
"""
Explanation: Implement Preprocessing Function
Text to Word Ids
As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function text_to_ids(), you'll turn source_text and target_text from words to ids. However, you need to add the <EOS> word id at the end of each sentence from target_text. This will help the neural network predict when the sentence should end.
You can get the <EOS> word id by doing:
python
target_vocab_to_int['<EOS>']
You can get other word ids using source_vocab_to_int and target_vocab_to_int.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) in [LooseVersion('1.0.0'), LooseVersion('1.0.1')], 'This project requires TensorFlow version 1.0 You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Check the Version of TensorFlow and Access to GPU
This will check to make sure you have the correct version of TensorFlow and access to a GPU
End of explanation
"""
def model_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate, keep probability)
"""
# TODO: Implement Function
input = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None], name='targets')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return input, targets, learning_rate, keep_prob
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_model_inputs(model_inputs)
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below:
- model_inputs
- process_decoding_input
- encoding_layer
- decoding_layer_train
- decoding_layer_infer
- decoding_layer
- seq2seq_model
Input
Implement the model_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
Input text placeholder named "input" using the TF Placeholder name parameter with rank 2.
Targets placeholder with rank 2.
Learning rate placeholder with rank 0.
Keep probability placeholder named "keep_prob" using the TF Placeholder name parameter with rank 0.
Return the placeholders in the following the tuple (Input, Targets, Learing Rate, Keep Probability)
End of explanation
"""
def process_decoding_input(target_data, target_vocab_to_int, batch_size):
"""
Preprocess target data for decoding
:param target_data: Target Placeholder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
"""
# TODO: Implement Function
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1)
return dec_input
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_process_decoding_input(process_decoding_input)
"""
Explanation: Process Decoding Input
Implement process_decoding_input using TensorFlow to remove the last word id from each batch in target_data and concat the GO ID to the beginning of each batch.
End of explanation
"""
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):
"""
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:return: RNN state
"""
# TODO: Implement Function
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
lstm = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
enc_cell = tf.contrib.rnn.MultiRNNCell([lstm] * num_layers)
_, enc_state = tf.nn.dynamic_rnn(enc_cell, rnn_inputs, dtype=tf.float32)
return enc_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_encoding_layer(encoding_layer)
"""
Explanation: Encoding
Implement encoding_layer() to create a Encoder RNN layer using tf.nn.dynamic_rnn().
End of explanation
"""
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob):
"""
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param sequence_length: Sequence Length
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Train Logits
"""
# TODO: Implement Function
#with tf.variable_scope("decoding") as decoding_scope:
# Training Decoder
train_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_train(encoder_state)
dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, output_keep_prob=keep_prob)
train_pred, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(
dec_cell, train_decoder_fn, dec_embed_input, sequence_length, scope=decoding_scope)
# Apply output function
train_logits = output_fn(train_pred)
return train_logits
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_train(decoding_layer_train)
"""
Explanation: Decoding - Training
Create training logits using tf.contrib.seq2seq.simple_decoder_fn_train() and tf.contrib.seq2seq.dynamic_rnn_decoder(). Apply the output_fn to the tf.contrib.seq2seq.dynamic_rnn_decoder() outputs.
End of explanation
"""
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size, decoding_scope, output_fn, keep_prob):
"""
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param maximum_length: The maximum allowed time steps to decode
:param vocab_size: Size of vocabulary
:param decoding_scope: TensorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Inference Logits
"""
# TODO: Implement Function
# Inference Decoder
infer_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_inference(
output_fn, encoder_state, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size)
inference_logits, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, infer_decoder_fn, scope=decoding_scope)
return inference_logits
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_infer(decoding_layer_infer)
"""
Explanation: Decoding - Inference
Create inference logits using tf.contrib.seq2seq.simple_decoder_fn_inference() and tf.contrib.seq2seq.dynamic_rnn_decoder().
End of explanation
"""
def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size,
num_layers, target_vocab_to_int, keep_prob):
"""
Create decoding layer
:param dec_embed_input: Decoder embedded input
:param dec_embeddings: Decoder embeddings
:param encoder_state: The encoded state
:param vocab_size: Size of vocabulary
:param sequence_length: Sequence Length
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param keep_prob: Dropout keep probability
:return: Tuple of (Training Logits, Inference Logits)
"""
# TODO: Implement Function
# Decoder RNNs
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
lstm = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
dec_cell = tf.contrib.rnn.MultiRNNCell([lstm] * num_layers)
with tf.variable_scope("decoding") as decoding_scope:
# Output Layer
output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
#with tf.variable_scope("decoding") as decoding_scope:
train_logits = decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob)
with tf.variable_scope("decoding", reuse=True) as decoding_scope:
start_of_sequence_id = target_vocab_to_int['<GO>']
end_of_sequence_id = target_vocab_to_int['<EOS>']
maximum_length = sequence_length - 1
inference_logits = decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, vocab_size, decoding_scope, output_fn, keep_prob)
return train_logits, inference_logits
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer(decoding_layer)
"""
Explanation: Build the Decoding Layer
Implement decoding_layer() to create a Decoder RNN layer.
Create RNN cell for decoding using rnn_size and num_layers.
Create the output fuction using lambda to transform it's input, logits, to class logits.
Use the your decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob) function to get the training logits.
Use your decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, vocab_size, decoding_scope, output_fn, keep_prob) function to get the inference logits.
Note: You'll need to use tf.variable_scope to share variables between training and inference.
End of explanation
"""
def seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int):
"""
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param sequence_length: Sequence Length
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training Logits, Inference Logits)
"""
# TODO: Implement Function
#Apply embedding to the input data for the encoder.
enc_embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, enc_embedding_size)
#Encode the input using your encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob).
enc_state = encoding_layer(enc_embed_input, rnn_size, num_layers, keep_prob)
#Process target data using your process_decoding_input(target_data, target_vocab_to_int, batch_size) function.
dec_input = process_decoding_input(target_data, target_vocab_to_int, batch_size)
#Apply embedding to the target data for the decoder.
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, dec_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
#Decode the encoded input using your decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob).
train_logits, inference_logits = decoding_layer(dec_embed_input, dec_embeddings, enc_state, target_vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob)
return train_logits, inference_logits
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_seq2seq_model(seq2seq_model)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
Apply embedding to the input data for the encoder.
Encode the input using your encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob).
Process target data using your process_decoding_input(target_data, target_vocab_to_int, batch_size) function.
Apply embedding to the target data for the decoder.
Decode the encoded input using your decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob).
End of explanation
"""
# Number of Epochs
epochs = 20
# Batch Size
batch_size = 512
# RNN Size
rnn_size = 128
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 200
decoding_embedding_size = 200
# Learning Rate
learning_rate = 0.001
# Dropout Keep Probability
keep_probability = 0.5
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set num_layers to the number of layers.
Set encoding_embedding_size to the size of the embedding for the encoder.
Set decoding_embedding_size to the size of the embedding for the decoder.
Set learning_rate to the learning rate.
Set keep_probability to the Dropout keep probability
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_source_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob = model_inputs()
sequence_length = tf.placeholder_with_default(max_source_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(
tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int)
tf.identity(inference_logits, 'logits')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([input_shape[0], sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import time
def get_accuracy(target, logits):
"""
Calculate accuracy
"""
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1]), (0,0)],
'constant')
return np.mean(np.equal(target, np.argmax(logits, 2)))
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = helper.pad_sentence_batch(source_int_text[:batch_size])
valid_target = helper.pad_sentence_batch(target_int_text[:batch_size])
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
start_time = time.time()
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
sequence_length: target_batch.shape[1],
keep_prob: keep_probability})
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch, keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source, keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(np.array(valid_target), batch_valid_logits)
end_time = time.time()
if batch_i % 10 == 0:
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forums to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params(save_path)
"""
Explanation: Save Parameters
Save the batch_size and save_path parameters for inference.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def sentence_to_seq(sentence, vocab_to_int):
"""
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
"""
# TODO: Implement Function
lower_sentence = sentence.lower()
id_seq = []
for word in lower_sentence.split():
id_seq.append(vocab_to_int.get(word, vocab_to_int['<UNK>']))
return id_seq
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_sentence_to_seq(sentence_to_seq)
"""
Explanation: Sentence to Sequence
To feed a sentence into the model for translation, you first need to preprocess it. Implement the function sentence_to_seq() to preprocess new sentences.
Convert the sentence to lowercase
Convert words into ids using vocab_to_int
Convert words not in the vocabulary, to the <UNK> word id.
End of explanation
"""
translate_sentence = 'he saw a old yellow truck .'
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('logits:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence], keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in np.argmax(translate_logits, 1)]))
print(' French Words: {}'.format([target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
"""
Explanation: Translate
This will translate translate_sentence from English to French.
End of explanation
"""
|
spulido99/NetworksAnalysis | DiderGonzalez/Ejercicios 1.4/Ejercicios 1.4 Power Law & Scale Free Networks.ipynb | mit | import numpy as np
from scipy.stats import powerlaw
import scipy as sp
import seaborn as sns
sns.set()
%matplotlib inline
edges =[]
for line in open('facebook_combined.txt'):
if line[0] != '#':
# print(line.replace('\n','').split(' ')) # \n es salto de linea, .split(' ') -> separar por espacio, .split('\t') -> si esta separado por tabulación
d = line.replace('\n','').split(' ')
edges.append((d[0],d[1]))
#print(edges)
import networkx as nx
G = nx.Graph() # G = nx.Graph(edges), otro modo de crear el grafo
G.add_edges_from(edges)
degrees = [degree for _, degree in G.degree().items()]
# print(degrees)
sns.distplot(degrees)
e, l, s = sp.stats.powerlaw.fit(degrees)
e, l, s
ax = sns.distplot(sp.stats.powerlaw.rvs(e, l, s, size=100000))
import matplotlib.pyplot as plt
log = np.log(degrees)
plt.hist(log, log=True)
"""
Explanation: Ejercicios Power Law & Scale Free Network
Power Law Distribution
Grafique en Log-Log la distribución de grados de una red de escala libre (una red real o creada).
Calcule el coeficiente de la power law graficada.
End of explanation
"""
a,b,l,s = sp.stats.fit(r)
(a,b,l,s)
"""
Explanation: Generative Network Model
Simulación de Red de Citaciones
Defina un número de papers N. Los papers van a ser publicados en orden (primero 1, después 2, etc.) Defina el promedio de citaciones de cada paper (e.g. 3 citaciones). Cree un grupo inicial de papers. Cada nuevo paper tendrá c citaciones. Esas citaciones van a ser a papers ya existentes y proporcional al número de papers que citan a cada paper.
Se crearon Hubs? Cuál es la distribución de grados de la red?
End of explanation
"""
|
wegamekinglc/Finance-Python | ipynb/Presentation for Analysis.ipynb | mit | from PyFin.Math.Accumulators import Latest
exp1 = Latest('x')
exp1
"""
Explanation: Finance-Python
原始项目地址:Finance-Python(https://github.com/wegamekinglc/Finance-Python);
python setup.py install 或者,
pip install finance-python
相关依赖请见主目录下 requirements 文件夹。
Operator in Declarative Style
声明式
计算表达式被抽象为一些算子,用户无需给出计算的流程,只需要使用这些算子表达自己的想法。
例如,我们需要计算向量 $\bar x$ 的均值,命令式的做法可能如下:
python
sum_value = 0
for v in x:
sum_value += v
average_value = sum_value / len(x)
而声明式的做法:
python
average_value = mean(x)
上面的 mean 就是我们所谓的算子概念。
延迟计算
表达式无需在定义的位置获得结果,只需在需要的时候进行计算即可:
```python
expression = operator(dependency)
```
中间会有一些其他计算的代码:
......
这里的 expression 是一个表达式对象,而不是计算的结果,在需要值的时候:
```python
expression_value = expression.value
```
支持算术运算
表达式对象支持基本的 +-*/ 运算。
例如,计算收益率的sharp值,我们可以这样去定义表达式:
```python
sharp_expression = mean(x) / std(x)
```
这里的 sharp_expression 是一个新的表达式,由两个基础的表达式构造而成。
支持复合运算
运算可以复合,例如实现以下的逻辑,计算过去20日每日50日均线的标准差:
```python
compounded_expression = std(mean(x, 50), 20)
```
Implementation in Finance-Python
在 Finance-Python 中,以 accumulator 的形式实现了上面的 Declarative Style Operator。accumulator 是具有自身状态的算符,
Accumulator
Hello World
下面的这个例子,使用 Latest 算符,保留输入值的最近状态。
End of explanation
"""
# 1st round
exp1.push({'x': 1})
print("Value after 1st round: {0}".format(exp1.value))
# 2nd round
exp1.push({'x': 2})
print("Value after 2nd round: {0}".format(exp1.value))
# repeate
print("Do nothing: {0}".format(exp1.value))
# 3rd and 4th round
exp1.push({'x': 3})
exp1.push({'x': 4})
print("Value after 3rd/4th round: {0}".format(exp1.value))
"""
Explanation: 上面可以看到 exp1 是一个 accumulator 的实例。
End of explanation
"""
from PyFin.Math.Accumulators import MovingAverage
ma = MovingAverage(x='x', window=2)
values = [1, 2, 3, 4, 5]
for i, x in enumerate(values):
ma.push({'x': x})
print("{0}: {1}".format(i, ma.value))
"""
Explanation: One Step Further
下面的例子,计算过去两个输入值的均值:
End of explanation
"""
import numpy as np
from PyFin.Math.Accumulators import MovingVariance
from PyFin.Math.Accumulators import Sqrt
np.random.seed(47)
ret_simulated = 0.0005 + np.random.randn(2000) / 100.
ret_mean = MovingAverage(x='x', window=250)
ret_std = Sqrt(MovingVariance(x='x', window=250)) # Compounded accumlator is used here
sharp = ret_mean / ret_std # dividing can be used for accumulators
sharp
"""
Explanation: More complicated examples
算术运算/复合运算
计算一组收益率序列的滚动sharp,时间窗口为250日。
构造算子:
End of explanation
"""
sharp_series = []
for ret in ret_simulated:
sharp.push({'x': ret})
sharp_series.append(sharp.value)
"""
Explanation: 输入数据:
End of explanation
"""
%matplotlib inline
import pandas as pd
df = pd.DataFrame({'returns': ret_simulated.cumsum(), 'sharp': sharp_series})
df[250:].plot(secondary_y='sharp', figsize=(12, 6))
"""
Explanation: 把数据画出来出来:
End of explanation
"""
from PyFin.Math.Accumulators import MovingCorrelation
ma20 = MovingAverage(x='x', window=20)
ma50 = MovingAverage(x='x', window=50)
corr = MovingCorrelation(window=250, x=ma20, y=ma50)
corr
ma20_series = []
ma50_series = []
corr_series = []
for ret in ret_simulated:
ma20.push({'x': ret})
ma50.push({'x': ret})
corr.push({'x': ret})
ma20_series.append(ma20.value)
ma50_series.append(ma50.value)
corr_series.append(corr.value)
df = pd.DataFrame({'ma20': ma20_series, 'ma50': ma50_series, 'corr': corr_series})
df[300:].plot(secondary_y='corr', figsize=(12, 6))
"""
Explanation: 复合运算
下面的例子中,我们计算一组序列,20日均线和50日均线的250日相关系数:
End of explanation
"""
from PyFin.api import MA
from PyFin.examples.datas import sample_data
sample_data
ma2 = MA(2, 'close')
"""
Explanation: Working with Pandas
End of explanation
"""
ma2.transform(sample_data, category_field='code')
"""
Explanation: 根据 category_field 计算
End of explanation
"""
ma2 = MA(2, 'close')
ma2.transform(sample_data)
"""
Explanation: 直接计算
End of explanation
"""
|
altair-viz/altair_parser | JSONSchemaNotes.ipynb | bsd-3-clause | import json
import jsonschema
simple_schema = {
"type": "object",
"properties": {
"foo": {"type": "string"},
"bar": {"type": "number"}
}
}
good_instance = {
"foo": "hello world",
"bar": 3.141592653,
}
bad_instance = {
"foo" : 42,
"bar" : "string"
}
# Should succeed
jsonschema.validate(good_instance, simple_schema)
# Should fail
try:
jsonschema.validate(bad_instance, simple_schema)
except jsonschema.ValidationError as err:
print(err)
"""
Explanation: JSON Schema Parser
Notes on the JSON schema / Traitlets package
Goal: write a function that, given a JSON Schema, will generate code for traitlets objects which provide equivalent validation.
Links
JSON Schema Validation Information
jsonschema Python package
Altair 1.0 parsing code
By-Hand Example
First confirm that we're doing things correctly with the jsonschema package:
End of explanation
"""
import traitlets as T
class SimpleInstance(T.HasTraits):
foo = T.Unicode()
bar = T.Float()
# Should succeed
SimpleInstance(**good_instance)
# Should fail
try:
SimpleInstance(**bad_instance)
except T.TraitError as err:
print(err)
"""
Explanation: OK, now let's write a traitlets class that does the same thing:
End of explanation
"""
import jinja2
OBJECT_TEMPLATE = """
{%- for import in cls.imports %}
{{ import }}
{%- endfor %}
class {{ cls.classname }}({{ cls.baseclass }}):
{%- for (name, prop) in cls.properties.items() %}
{{ name }} = {{ prop.trait_code }}
{%- endfor %}
"""
class JSONSchema(object):
"""A class to wrap JSON Schema objects and reason about their contents"""
object_template = OBJECT_TEMPLATE
def __init__(self, schema, root=None):
self.schema = schema
self.root = root or schema
@property
def type(self):
# TODO: should the default type be considered object?
return self.schema.get('type', 'object')
@property
def trait_code(self):
type_dict = {'string': 'T.Unicode()',
'number': 'T.Float()',
'integer': 'T.Integer()',
'boolean': 'T.Bool()'}
if self.type not in type_dict:
raise NotImplementedError()
return type_dict[self.type]
@property
def classname(self):
# TODO: deal with non-root schemas somehow...
if self.schema is self.root:
return "RootInstance"
else:
raise NotImplementedError("Non-root object schema")
@property
def baseclass(self):
return "T.HasTraits"
@property
def imports(self):
return ["import traitlets as T"]
@property
def properties(self):
return {key: JSONSchema(val) for key, val in self.schema.get('properties', {}).items()}
def object_code(self):
return jinja2.Template(self.object_template).render(cls=self)
"""
Explanation: Roadmap
Start by recognizing all simple JSON types in the schema ("string", "number", "integer", "boolean", "null")
Next recognize objects containing simple types
Next recognize compound simple types (i.e. where type is a list of simple types)
Next recognize arrays & enums
Next recognize "$ref" definitions
Next recognize "anyOf", "oneOf", "allOf" definitions... first is essentially a traitlets Union, second is a Union where only one must match, and "allOf" is essentially a composite object (not sure if traitlets has that...) Note that among these, Vega-Lite only uses "anyOf"
Catalog all validation keywords... Implement custom traitlets that support all the various validation keywords for each type. (Validation keywords listed here)
Use hypothesis for testing?
Challenges & Questions to think about
JSONSchema ignores any keys/properties which are not listed in the schema. Traitlets warns, and in the future will raise an error for undefined keys/properties
this may be OK... we can just document the fact that traitlets is more prescriptive than JSONschema
JSON allows undefined values as well as explicit nulls, which map to None. Traitlets treats None as undefined. How to resolve this?
Best option is probably to use an undefined sentinel within the traitlets structure, such that the code knows when to ignore keys & produces dicts which translate directly to the correct JSON
Will probably need to define some custom trait types, e.g. Null, and also extend simple trait types to allow for the more extensive validations allowed in JSON Schema.
Generate subclasses with the code
What version of the schema should we target? Perhaps try to target multiple versions?
start with 04 because this is what's supported by jsonschema and used by Vega(-Lite)
Ideas
Easiest way: validate everything with a single HasTraits class via jsonschema, splitting out properties into traitlets
Interface
root schema and all definitions should become their own T.HasTraits class
Objects defined inline should also have their own class with a generated anonymous name
Use Jinja templating; allow output to one file or multiple files with relative imports
root object must have type="object"... this differs from jsonschema
Testing
test cases should be an increasingly complicated set of jsonschema objects, with test cases that should pass and fail. Perhaps store these in a JSON structure? (With a schema?)
An initial prototype
Let's try generating some traitlets classes for simple cases
End of explanation
"""
code = JSONSchema(simple_schema).object_code()
print(code)
"""
Explanation: Trying it out...
End of explanation
"""
exec(code) # defines RootInstance
# Good instance should validate correctly
RootInstance(**good_instance)
# Bad instance should raise a TraitError
try:
RootInstance(**bad_instance)
except T.TraitError as err:
print(err)
"""
Explanation: Testing the result
End of explanation
"""
|
tedunderwood/horizon | chapter1/notebooks/chapter1figs1and2.ipynb | mit | #!/usr/bin/env python3
import csv, os, sys
from collections import Counter
# import utils
sys.path.append('../../lib')
import SonicScrewdriver as utils
import FileCabinet as filecab
# start by loading the hard seeds
colors = set()
with open('../lexicons/colors.txt', encoding = 'utf-8') as f:
for line in f:
colors.add(line.strip())
logistic = dict()
realclass = dict()
titles = dict()
dates = dict()
with open('../metadata/prestigeset.csv', encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
logistic[row['volid']] = float(row['logistic'])
realclass[row['volid']] = row['prestige']
titles[row['volid']] = row['title']
dates[row['volid']] = int(row['dateused'])
sourcedir = '../sourcefiles/'
documents = filecab.get_wordcounts(sourcedir, '.tsv', set(logistic))
outrows = []
for docid, doc in documents.items():
if docid not in logistic:
continue
else:
allwords = 1
colorct = 0
for word, count in doc.items():
allwords += count
if word in colors:
colorct += count
outline = [docid, realclass[docid], logistic[docid], (colorct/allwords), dates[docid], titles[docid]]
outrows.append(outline)
fields = ['docid', 'class', 'logistic', 'colors', 'date', 'title']
with open('../plotdata/colorfic.csv', mode = 'w', encoding = 'utf-8') as f:
writer = csv.writer(f)
writer.writerow(fields)
for row in outrows:
writer.writerow(row)
"""
Explanation: Chapter 1, figures 1 and 2
The notebooks included in this repository are intended to show you how raw data was transformed into a particular table or graph.
The graphs may not look exactly like the published versions, because those were created in a different language (R). But they should be substantively equivalent.
Figure 1.1
Graphing the frequency of color vocabulary in a subset of volumes.
The list of words we count as "color words" is contained in colors.txt. The list of volumes to be plotted is contained in prestigeset.csv. That file actually contains a range of volumes, not all of which are particularly prestigious! Its name comes from the fact that it does record, in one column, whether the volume was included in a list of prestigious/reviewed volumes. (For more on the source of that list, see chapter 3).
Counting the frequency of color words
The code below counts words and creates a data file, colorfic.csv.
End of explanation
"""
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
"""
Explanation: Loading the data we just created as a data frame
It would have been more elegant to create a data frame in memory, instead of writing the data to file as an intermediary step, and then reading it back in.
But that's not how I originally wrote the process, and rewriting several years of code for pure elegance would be a bit extravagant. So having written the data out, let's read it back in.
End of explanation
"""
color_df = pd.read_csv('../plotdata/colorfic.csv')
color_df.head()
"""
Explanation: We can take a look at what is actually in the data frame.
End of explanation
"""
groups = color_df.groupby('class')
groupnames = {0: 'unmarked', 1: 'reviewed'}
groupcolors = {0: 'k', 1: 'r'}
fig, ax = plt.subplots(figsize = (9, 9))
ax.margins(0.05)
for code, group in groups:
ax.plot(group.date, group.colors, marker='o', linestyle='', ms=6, color = groupcolors[code], label=groupnames[code])
ax.legend(numpoints = 1, loc = 'upper left')
plt.show()
"""
Explanation: Visualizing the data
I'll use color to distinguish reviewed volumes from those not marked as reviewed in elite journals. (We don't actually know that they weren't ever reviewed.)
End of explanation
"""
post1800 = color_df[color_df.date > 1800]
groups = post1800.groupby('class')
groups.aggregate(np.mean)
"""
Explanation: other analysis, not in the book
Is there any difference between the frequency of color words in reviewed volumes and others? Let's focus on the volumes after 1800.
End of explanation
"""
from scipy.stats import ttest_ind
ttest_ind(color_df[color_df['class'] == 1].colors, color_df[(color_df['class'] == 0) & (color_df['date'] > 1800)].colors, equal_var = False)
"""
Explanation: I guess there is a really slight difference in the "colors" column. Reviewed works refer to colors a little more often. (Ignore the "logistic" column for now, it's inherited from a different process.) But is the difference in frequency of color words significant?
End of explanation
"""
stanford = set()
with open('../lexicons/stanford.csv', encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
if row['class'] == 'hard':
stanford.add(row['word'])
sourcedir = '../sourcefiles/'
pairedpaths = filecab.get_pairedpaths(sourcedir, '.tsv')
docids = [x[0] for x in pairedpaths]
wordcounts = filecab.get_wordcounts(sourcedir, '.tsv', docids)
metapath = '../metadata/allgenremeta.csv'
genredict = dict()
datedict = dict()
with open(metapath, encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
date = int(row['firstpub'])
genre = row['tags']
docid = row['docid']
if date not in datedict:
datedict[date] = []
datedict[date].append(docid)
genredict[docid] = genre
possible_genres = {'fic', 'bio'}
allcounts = dict()
hardseedcounts = dict()
for genre in possible_genres:
allcounts[genre] = Counter()
hardseedcounts[genre] = Counter()
for i in range(1700,2000):
if i in datedict:
candidates = datedict[i]
for anid in candidates:
genre = genredict[anid]
if genre not in possible_genres:
continue
if anid not in wordcounts:
print('error')
continue
else:
for word, count in wordcounts[anid].items():
allcounts[genre][i] += count
if word in stanford:
hardseedcounts[genre][i] += count
with open('../plotdata/hardaverages.csv', mode = 'w', encoding = 'utf-8') as f:
f.write('genre,year,hardpct\n')
for genre in possible_genres:
for i in range(1700,2000):
if i in allcounts[genre]:
pct = hardseedcounts[genre][i] / (allcounts[genre][i] + 1)
f.write(genre + ',' + str(i) + ',' + str(pct) + '\n')
"""
Explanation: No. That's not a significant result; there doesn't seem to be any meaningful difference between reviewed and unreviewed books, at least not at this scale of analysis.
Figure 1.2
Now let's calculate the frequency of Stanford "hard seeds" in biography and fiction, aggregating by year.
count the "hard seeds"
End of explanation
"""
hard_df = pd.read_csv('../plotdata/hardaverages.csv')
hard_df.head()
"""
Explanation: look at the data we created
End of explanation
"""
groups = hard_df.groupby('genre')
groupcolors = {'bio': 'k', 'fic': 'r', 'poe': 'g'}
fig, ax = plt.subplots(figsize = (9, 9))
ax.margins(0.05)
for code, group in groups:
if code == 'poe':
continue
ax.plot(group.year, group.hardpct, marker='o', linestyle='', ms=6, color = groupcolors[code], label=code)
ax.legend(numpoints = 1, loc = 'upper left')
plt.show()
"""
Explanation: now plot the yearly averages for biography and fiction
End of explanation
"""
|
harper/dlnd_thirdproject | tv-script-generation/dlnd_tv_script_generation.ipynb | mit | """
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
"""
Explanation: TV Script Generation
In this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern.
Get the Data
The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
End of explanation
"""
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
vocab_to_int = {}
int_to_vocab = {}
distinct_words = set(text)
for index, word in enumerate(distinct_words):
vocab_to_int[word] = index
int_to_vocab[index] = word
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
"""
Explanation: Implement Preprocessing Functions
The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:
- Lookup Table
- Tokenize Punctuation
Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call vocab_to_int
- Dictionary to go from the id to word, we'll call int_to_vocab
Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab)
End of explanation
"""
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
punctuation_dictionary = {}
punctuation_dictionary["."] = "||Period||"
punctuation_dictionary[","] = "||Comma||"
punctuation_dictionary["\""] = "||Quotation_Mark||"
punctuation_dictionary[";"] = "||Semicolon||"
punctuation_dictionary["!"] = "||Exclamation_Mark||"
punctuation_dictionary["?"] = "||Question_Mark||"
punctuation_dictionary["("] = "||Left_Parentheses||"
punctuation_dictionary[")"] = "||Right_Parentheses||"
punctuation_dictionary["--"] = "||Dash||"
punctuation_dictionary["\n"] = "||Return||"
return punctuation_dictionary
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
"""
Explanation: Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".
Implement the function token_lookup to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
- Period ( . )
- Comma ( , )
- Quotation Mark ( " )
- Semicolon ( ; )
- Exclamation mark ( ! )
- Question mark ( ? )
- Left Parentheses ( ( )
- Right Parentheses ( ) )
- Dash ( -- )
- Return ( \n )
This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a RNN by implementing the following functions below:
- get_inputs
- get_init_cell
- get_embed
- build_rnn
- build_nn
- get_batches
Check the Version of TensorFlow and Access to GPU
End of explanation
"""
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
input_placeholder = tf.placeholder(tf.int32, [None, None], name="input")
targets_placeholder = tf.placeholder(tf.int32, [None, None])
learning_rate_placeholder = tf.placeholder(tf.float32)
return input_placeholder, targets_placeholder, learning_rate_placeholder
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
"""
Explanation: Input
Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
- Input text placeholder named "input" using the TF Placeholder name parameter.
- Targets placeholder
- Learning Rate placeholder
Return the placeholders in the following tuple (Input, Targets, LearningRate)
End of explanation
"""
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
lstm_cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)
rnn_cell = tf.contrib.rnn.MultiRNNCell([lstm_cell] * 1)
rnn_initial_state = tf.identity(rnn_cell.zero_state(batch_size, tf.float32),"initial_state")
return rnn_cell, rnn_initial_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
"""
Explanation: Build RNN Cell and Initialize
Stack one or more BasicLSTMCells in a MultiRNNCell.
- The Rnn size should be set using rnn_size
- Initalize Cell State using the MultiRNNCell's zero_state() function
- Apply the name "initial_state" to the initial state using tf.identity()
Return the cell and initial state in the following tuple (Cell, InitialState)
End of explanation
"""
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, embed_dim], -1, 1))
return tf.nn.embedding_lookup(embedding_matrix, input_data)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
"""
Explanation: Word Embedding
Apply embedding to input_data using TensorFlow. Return the embedded sequence.
End of explanation
"""
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(final_state, "final_state")
return outputs, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
"""
Explanation: Build RNN
You created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN.
- Build the RNN using the tf.nn.dynamic_rnn()
- Apply the name "final_state" to the final state using tf.identity()
Return the outputs and final_state state in the following tuple (Outputs, FinalState)
End of explanation
"""
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
embeddings = get_embed(input_data, vocab_size, embed_dim)
outputs, final_state = build_rnn(cell, embeddings)
logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=None)
return logits, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
- Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function.
- Build RNN using cell and your build_rnn(cell, inputs) function.
- Apply a fully connected layer with a linear activation and vocab_size as the number of outputs.
Return the logits and final state in the following tuple (Logits, FinalState)
End of explanation
"""
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
full_batch_size = batch_size * seq_length
number_of_batches = (len(int_text) - 1) // full_batch_size
input_ints = int_text[:number_of_batches * full_batch_size]
target_ints = int_text[1:number_of_batches * full_batch_size + 1]
count = number_of_batches * batch_size
batches = []
for i in range(0, number_of_batches):
input_batch = []
target_batch = []
batches.append([input_batch, target_batch])
batch_counter = 0
for x in range(0,count):
batches[batch_counter][0].append(input_ints[x * seq_length:x * seq_length + seq_length])
batches[batch_counter][1].append(target_ints[x * seq_length:x * seq_length + seq_length])
batch_counter = batch_counter + 1
if batch_counter >= number_of_batches:
batch_counter = 0
batches = np.asarray(batches)
return batches
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
print(get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3))
"""
Explanation: Batches
Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements:
- The first element is a single batch of input with the shape [batch size, sequence length]
- The second element is a single batch of targets with the shape [batch size, sequence length]
If you can't fill the last batch with enough data, drop the last batch.
For exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3) would return a Numpy array of the following:
```
[
# First Batch
[
# Batch of Input
[[ 1 2 3], [ 7 8 9]],
# Batch of targets
[[ 2 3 4], [ 8 9 10]]
],
# Second Batch
[
# Batch of Input
[[ 4 5 6], [10 11 12]],
# Batch of targets
[[ 5 6 7], [11 12 13]]
]
]
```
End of explanation
"""
# Number of Epochs
num_epochs = 120
# Batch Size
batch_size = 256
# RNN Size
rnn_size = 100
# Embedding Dimension Size
embed_dim = 300
# Sequence Length
seq_length = 20
# Learning Rate
learning_rate = .01
# Show stats for every n number of batches
show_every_n_batches = 10
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set num_epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set embed_dim to the size of the embedding.
Set seq_length to the length of sequence.
Set learning_rate to the learning rate.
Set show_every_n_batches to the number of batches the neural network should print progress.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
"""
Explanation: Save Parameters
Save seq_length and save_dir for generating a new TV script.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
input_tensor = loaded_graph.get_tensor_by_name("input:0")
initial_state_tensor = loaded_graph.get_tensor_by_name("initial_state:0")
final_state_tensor = loaded_graph.get_tensor_by_name("final_state:0")
probs_tensor = loaded_graph.get_tensor_by_name("probs:0")
return input_tensor, initial_state_tensor, final_state_tensor, probs_tensor
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
"""
Explanation: Implement Generate Functions
Get Tensors
Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names:
- "input:0"
- "initial_state:0"
- "final_state:0"
- "probs:0"
Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
End of explanation
"""
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
index = probabilities.argmax()
return int_to_vocab[index]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
"""
Explanation: Choose Word
Implement the pick_word() function to select the next word using probabilities.
End of explanation
"""
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
"""
Explanation: Generate TV Script
This will generate the TV script for you. Set gen_length to the length of TV script you want to generate.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/mri/cmip6/models/sandbox-1/ocnbgchem.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mri', 'sandbox-1', 'ocnbgchem')
"""
Explanation: ES-DOC CMIP6 Model Properties - Ocnbgchem
MIP Era: CMIP6
Institute: MRI
Source ID: SANDBOX-1
Topic: Ocnbgchem
Sub-Topics: Tracers.
Properties: 65 (37 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:19
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
4. Key Properties --> Transport Scheme
5. Key Properties --> Boundary Forcing
6. Key Properties --> Gas Exchange
7. Key Properties --> Carbon Chemistry
8. Tracers
9. Tracers --> Ecosystem
10. Tracers --> Ecosystem --> Phytoplankton
11. Tracers --> Ecosystem --> Zooplankton
12. Tracers --> Disolved Organic Matter
13. Tracers --> Particules
14. Tracers --> Dic Alkalinity
1. Key Properties
Ocean Biogeochemistry key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of ocean biogeochemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean biogeochemistry model code (PISCES 2.0,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Geochemical"
# "NPZD"
# "PFT"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of ocean biogeochemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Fixed"
# "Variable"
# "Mix of both"
# TODO - please enter value(s)
"""
Explanation: 1.4. Elemental Stoichiometry
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe elemental stoichiometry (fixed, variable, mix of the two)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Elemental Stoichiometry Details
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe which elements have fixed/variable stoichiometry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.6. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
List of all prognostic tracer variables in the ocean biogeochemistry component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.7. Diagnostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
List of all diagnotic tracer variables in the ocean biogeochemistry component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.damping')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.8. Damping
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any tracer damping used (such as artificial correction or relaxation to climatology,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
Time stepping method for passive tracers transport in ocean biogeochemistry
2.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time stepping framework for passive tracers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.2. Timestep If Not From Ocean
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Time step for passive tracers (if different from ocean)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
Time stepping framework for biology sources and sinks in ocean biogeochemistry
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time stepping framework for biology sources and sinks
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Timestep If Not From Ocean
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Time step for biology sources and sinks (if different from ocean)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline"
# "Online"
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Transport Scheme
Transport scheme in ocean biogeochemistry
4.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of transport scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Use that of ocean model"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 4.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Transport scheme used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Use Different Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Decribe transport scheme if different than that of ocean model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Atmospheric Chemistry model"
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Boundary Forcing
Properties of biogeochemistry boundary forcing
5.1. Atmospheric Deposition
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how atmospheric deposition is modeled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Land Surface model"
# TODO - please enter value(s)
"""
Explanation: 5.2. River Input
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how river input is modeled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Sediments From Boundary Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
List which sediments are speficied from boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Sediments From Explicit Model
Is Required: FALSE Type: STRING Cardinality: 0.1
List which sediments are speficied from explicit sediment model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Gas Exchange
*Properties of gas exchange in ocean biogeochemistry *
6.1. CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CO2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.2. CO2 Exchange Type
Is Required: FALSE Type: ENUM Cardinality: 0.1
Describe CO2 gas exchange
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.3. O2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is O2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.4. O2 Exchange Type
Is Required: FALSE Type: ENUM Cardinality: 0.1
Describe O2 gas exchange
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.5. DMS Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is DMS gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.6. DMS Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify DMS gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.7. N2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is N2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.8. N2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify N2 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.9. N2O Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is N2O gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.10. N2O Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify N2O gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.11. CFC11 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CFC11 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.12. CFC11 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify CFC11 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.13. CFC12 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CFC12 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.14. CFC12 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify CFC12 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.15. SF6 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is SF6 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.16. SF6 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify SF6 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.17. 13CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is 13CO2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.18. 13CO2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify 13CO2 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.19. 14CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is 14CO2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.20. 14CO2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify 14CO2 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.21. Other Gases
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any other gas exchange
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other protocol"
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Carbon Chemistry
Properties of carbon chemistry biogeochemistry
7.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how carbon chemistry is modeled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea water"
# "Free"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7.2. PH Scale
Is Required: FALSE Type: ENUM Cardinality: 0.1
If NOT OMIP protocol, describe pH scale.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Constants If Not OMIP
Is Required: FALSE Type: STRING Cardinality: 0.1
If NOT OMIP protocol, list carbon chemistry constants.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Tracers
Ocean biogeochemistry tracers
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of tracers in ocean biogeochemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.2. Sulfur Cycle Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sulfur cycle modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrogen (N)"
# "Phosphorous (P)"
# "Silicium (S)"
# "Iron (Fe)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Nutrients Present
Is Required: TRUE Type: ENUM Cardinality: 1.N
List nutrient species present in ocean biogeochemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrates (NO3)"
# "Amonium (NH4)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.4. Nitrous Species If N
Is Required: FALSE Type: ENUM Cardinality: 0.N
If nitrogen present, list nitrous species.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dentrification"
# "N fixation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.5. Nitrous Processes If N
Is Required: FALSE Type: ENUM Cardinality: 0.N
If nitrogen present, list nitrous processes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Tracers --> Ecosystem
Ecosystem properties in ocean biogeochemistry
9.1. Upper Trophic Levels Definition
Is Required: TRUE Type: STRING Cardinality: 1.1
Definition of upper trophic level (e.g. based on size) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Upper Trophic Levels Treatment
Is Required: TRUE Type: STRING Cardinality: 1.1
Define how upper trophic level are treated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "PFT including size based (specify both below)"
# "Size based only (specify below)"
# "PFT only (specify below)"
# TODO - please enter value(s)
"""
Explanation: 10. Tracers --> Ecosystem --> Phytoplankton
Phytoplankton properties in ocean biogeochemistry
10.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of phytoplankton
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diatoms"
# "Nfixers"
# "Calcifiers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Pft
Is Required: FALSE Type: ENUM Cardinality: 0.N
Phytoplankton functional types (PFT) (if applicable)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microphytoplankton"
# "Nanophytoplankton"
# "Picophytoplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.3. Size Classes
Is Required: FALSE Type: ENUM Cardinality: 0.N
Phytoplankton size classes (if applicable)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "Size based (specify below)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Tracers --> Ecosystem --> Zooplankton
Zooplankton properties in ocean biogeochemistry
11.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of zooplankton
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microzooplankton"
# "Mesozooplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Size Classes
Is Required: FALSE Type: ENUM Cardinality: 0.N
Zooplankton size classes (if applicable)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12. Tracers --> Disolved Organic Matter
Disolved organic matter properties in ocean biogeochemistry
12.1. Bacteria Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there bacteria representation ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Labile"
# "Semi-labile"
# "Refractory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Lability
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe treatment of lability in dissolved organic matter
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diagnostic"
# "Diagnostic (Martin profile)"
# "Diagnostic (Balast)"
# "Prognostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Tracers --> Particules
Particulate carbon properties in ocean biogeochemistry
13.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is particulate carbon represented in ocean biogeochemistry?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "POC"
# "PIC (calcite)"
# "PIC (aragonite"
# "BSi"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Types If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.N
If prognostic, type(s) of particulate matter taken into account
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No size spectrum used"
# "Full size spectrum"
# "Discrete size classes (specify which below)"
# TODO - please enter value(s)
"""
Explanation: 13.3. Size If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, describe if a particule size spectrum is used to represent distribution of particules in water volume
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13.4. Size If Discrete
Is Required: FALSE Type: STRING Cardinality: 0.1
If prognostic and discrete size, describe which size classes are used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Function of particule size"
# "Function of particule type (balast)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Sinking Speed If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, method for calculation of sinking speed of particules
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "C13"
# "C14)"
# TODO - please enter value(s)
"""
Explanation: 14. Tracers --> Dic Alkalinity
DIC and alkalinity properties in ocean biogeochemistry
14.1. Carbon Isotopes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which carbon isotopes are modelled (C13, C14)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.2. Abiotic Carbon
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is abiotic carbon modelled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Prognostic"
# "Diagnostic)"
# TODO - please enter value(s)
"""
Explanation: 14.3. Alkalinity
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is alkalinity modelled ?
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/time_series_prediction/solutions/4_modeling_keras.ipynb | apache-2.0 | import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from google.cloud import bigquery
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (Dense, DenseFeatures,
Conv1D, MaxPool1D,
Reshape, RNN,
LSTM, GRU, Bidirectional)
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
from tensorflow.keras.optimizers import Adam
# To plot pretty figures
%matplotlib inline
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# For reproducible results.
from numpy.random import seed
seed(1)
tf.random.set_seed(2)
PROJECT = "your-gcp-project-here" # REPLACE WITH YOUR PROJECT NAME
BUCKET = "your-gcp-bucket-here" # REPLACE WITH YOUR BUCKET
REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
%env
PROJECT = PROJECT
BUCKET = BUCKET
REGION = REGION
"""
Explanation: Time Series Prediction
Objectives
1. Build a linear, DNN and CNN model in keras to predict stock market behavior.
2. Build a simple RNN model and a multi-layer RNN model in keras.
3. Combine RNN and CNN architecture to create a keras model to predict stock market behavior.
In this lab we will build a custom Keras model to predict stock market behavior using the stock market dataset we created in the previous labs. We'll start with a linear, DNN and CNN model
Note: In the previous lab we created the data, if you haven’t run the previous notebooks, go back to optional_1_data_exploration.ipynb and optional_2_feature_engineering.ipynb notebooks and run them to create the required data.
Since the features of our model are sequential in nature, we'll next look at how to build various RNN models in keras. We'll start with a simple RNN model and then see how to create a multi-layer RNN in keras. We'll also see how to combine features of 1-dimensional CNNs with a typical RNN architecture.
We will be exploring a lot of different model types in this notebook. To keep track of your results, record the accuracy on the validation set in the table here. In machine learning there are rarely any "one-size-fits-all" so feel free to test out different hyperparameters (e.g. train steps, regularization, learning rates, optimizers, batch size) for each of the models. Keep track of your model performance in the chart below.
| Model | Validation Accuracy |
|----------|:---------------:|
| Baseline | 0.295 |
| Linear | -- |
| DNN | -- |
| 1-d CNN | -- |
| simple RNN | -- |
| multi-layer RNN | -- |
| RNN using CNN features | -- |
| CNN using RNN features | -- |
Load necessary libraries and set up environment variables
End of explanation
"""
%%time
bq = bigquery.Client(project=PROJECT)
bq_query = '''
#standardSQL
SELECT
symbol,
Date,
direction,
close_values_prior_260
FROM
`stock_market.eps_percent_change_sp500`
LIMIT
100
'''
df_stock_raw = bq.query(bq_query).to_dataframe()
df_stock_raw.head()
"""
Explanation: Explore time series data
We'll start by pulling a small sample of the time series data from Big Query and write some helper functions to clean up the data for modeling. We'll use the data from the percent_change_sp500 table in BigQuery. The close_values_prior_260 column contains the close values for any given stock for the previous 260 days.
End of explanation
"""
def clean_data(input_df):
"""Cleans data to prepare for training.
Args:
input_df: Pandas dataframe.
Returns:
Pandas dataframe.
"""
df = input_df.copy()
# TF doesn't accept datetimes in DataFrame.
df['Date'] = pd.to_datetime(df['Date'], errors='coerce')
df['Date'] = df['Date'].dt.strftime('%Y-%m-%d')
# TF requires numeric label.
df['direction_numeric'] = df['direction'].apply(lambda x: {'DOWN': 0,
'STAY': 1,
'UP': 2}[x])
return df
df_stock = clean_data(df_stock_raw)
df_stock.head()
"""
Explanation: The function clean_data below does three things:
1. First, we'll remove any inf or NA values
2. Next, we parse the Date field to read it as a string.
3. Lastly, we convert the label direction into a numeric quantity, mapping 'DOWN' to 0, 'STAY' to 1 and 'UP' to 2.
End of explanation
"""
STOCK_HISTORY_COLUMN = 'close_values_prior_260'
COL_NAMES = ['day_' + str(day) for day in range(0, 260)]
LABEL = 'direction_numeric'
def _scale_features(df):
"""z-scale feature columns of Pandas dataframe.
Args:
features: Pandas dataframe.
Returns:
Pandas dataframe with each column standardized according to the
values in that column.
"""
avg = df.mean()
std = df.std()
return (df - avg) / std
def create_features(df, label_name):
"""Create modeling features and label from Pandas dataframe.
Args:
df: Pandas dataframe.
label_name: str, the column name of the label.
Returns:
Pandas dataframe
"""
# Expand 1 column containing a list of close prices to 260 columns.
time_series_features = df[STOCK_HISTORY_COLUMN].apply(pd.Series)
# Rename columns.
time_series_features.columns = COL_NAMES
time_series_features = _scale_features(time_series_features)
# Concat time series features with static features and label.
label_column = df[LABEL]
return pd.concat([time_series_features,
label_column], axis=1)
df_features = create_features(df_stock, LABEL)
df_features.head()
"""
Explanation: Read data and preprocessing
Before we begin modeling, we'll preprocess our features by scaling to the z-score. This will ensure that the range of the feature values being fed to the model are comparable and should help with convergence during gradient descent.
End of explanation
"""
ix_to_plot = [0, 1, 9, 5]
fig, ax = plt.subplots(1, 1, figsize=(15, 8))
for ix in ix_to_plot:
label = df_features['direction_numeric'].iloc[ix]
example = df_features[COL_NAMES].iloc[ix]
ax = example.plot(label=label, ax=ax)
ax.set_ylabel('scaled price')
ax.set_xlabel('prior days')
ax.legend()
"""
Explanation: Let's plot a few examples and see that the preprocessing steps were implemented correctly.
End of explanation
"""
def _create_split(phase):
"""Create string to produce train/valid/test splits for a SQL query.
Args:
phase: str, either TRAIN, VALID, or TEST.
Returns:
String.
"""
floor, ceiling = '2002-11-01', '2010-07-01'
if phase == 'VALID':
floor, ceiling = '2010-07-01', '2011-09-01'
elif phase == 'TEST':
floor, ceiling = '2011-09-01', '2012-11-30'
return '''
WHERE Date >= '{0}'
AND Date < '{1}'
'''.format(floor, ceiling)
def create_query(phase):
"""Create SQL query to create train/valid/test splits on subsample.
Args:
phase: str, either TRAIN, VALID, or TEST.
sample_size: str, amount of data to take for subsample.
Returns:
String.
"""
basequery = """
#standardSQL
SELECT
symbol,
Date,
direction,
close_values_prior_260
FROM
`stock_market.eps_percent_change_sp500`
"""
return basequery + _create_split(phase)
bq = bigquery.Client(project=PROJECT)
for phase in ['TRAIN', 'VALID', 'TEST']:
# 1. Create query string
query_string = create_query(phase)
# 2. Load results into DataFrame
df = bq.query(query_string).to_dataframe()
# 3. Clean, preprocess dataframe
df = clean_data(df)
df = create_features(df, label_name='direction_numeric')
# 3. Write DataFrame to CSV
if not os.path.exists('../data'):
os.mkdir('../data')
df.to_csv('../data/stock-{}.csv'.format(phase.lower()),
index_label=False, index=False)
print("Wrote {} lines to {}".format(
len(df),
'../data/stock-{}.csv'.format(phase.lower())))
ls -la ../data
"""
Explanation: Make train-eval-test split
Next, we'll make repeatable splits for our train/validation/test datasets and save these datasets to local csv files. The query below will take a subsample of the entire dataset and then create a 70-15-15 split for the train/validation/test sets.
End of explanation
"""
N_TIME_STEPS = 260
N_LABELS = 3
Xtrain = pd.read_csv('../data/stock-train.csv')
Xvalid = pd.read_csv('../data/stock-valid.csv')
ytrain = Xtrain.pop(LABEL)
yvalid = Xvalid.pop(LABEL)
ytrain_categorical = to_categorical(ytrain.values)
yvalid_categorical = to_categorical(yvalid.values)
"""
Explanation: Modeling
For experimentation purposes, we'll train various models using data we can fit in memory using the .csv files we created above.
End of explanation
"""
def plot_curves(train_data, val_data, label='Accuracy'):
"""Plot training and validation metrics on single axis.
Args:
train_data: list, metrics obtrained from training data.
val_data: list, metrics obtained from validation data.
label: str, title and label for plot.
Returns:
Matplotlib plot.
"""
plt.plot(np.arange(len(train_data)) + 0.5,
train_data,
"b.-", label="Training " + label)
plt.plot(np.arange(len(val_data)) + 1,
val_data, "r.-",
label="Validation " + label)
plt.gca().xaxis.set_major_locator(mpl.ticker.MaxNLocator(integer=True))
plt.legend(fontsize=14)
plt.xlabel("Epochs")
plt.ylabel(label)
plt.grid(True)
"""
Explanation: To monitor training progress and compare evaluation metrics for different models, we'll use the function below to plot metrics captured from the training job such as training and validation loss or accuracy.
End of explanation
"""
sum(yvalid == ytrain.value_counts().idxmax()) / yvalid.shape[0]
"""
Explanation: Baseline
Before we begin modeling in keras, let's create a benchmark using a simple heuristic. Let's see what kind of accuracy we would get on the validation set if we predict the majority class of the training set.
End of explanation
"""
# TODO 1a
model = Sequential()
model.add(Dense(units=N_LABELS,
activation='softmax',
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.compile(optimizer=Adam(lr=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=30,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
"""
Explanation: Ok. So just naively guessing the most common outcome UP will give about 29.5% accuracy on the validation set.
Linear model
We'll start with a simple linear model, mapping our sequential input to a single fully dense layer.
End of explanation
"""
np.mean(history.history['val_accuracy'][-5:])
"""
Explanation: The accuracy seems to level out pretty quickly. To report the accuracy, we'll average the accuracy on the validation set across the last few epochs of training.
End of explanation
"""
# TODO 1b
dnn_hidden_units = [16, 8]
model = Sequential()
for layer in dnn_hidden_units:
model.add(Dense(units=layer,
activation="relu"))
model.add(Dense(units=N_LABELS,
activation="softmax",
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.compile(optimizer=Adam(lr=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=10,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
"""
Explanation: Deep Neural Network
The linear model is an improvement on our naive benchmark. Perhaps we can do better with a more complicated model. Next, we'll create a deep neural network with keras. We'll experiment with a two layer DNN here but feel free to try a more complex model or add any other additional techniques to try an improve your performance.
End of explanation
"""
# TODO 1c
model = Sequential()
# Convolutional layer
model.add(Reshape(target_shape=[N_TIME_STEPS, 1]))
model.add(Conv1D(filters=5,
kernel_size=5,
strides=2,
padding="valid",
input_shape=[None, 1]))
model.add(MaxPool1D(pool_size=2,
strides=None,
padding='valid'))
# Flatten the result and pass through DNN.
model.add(tf.keras.layers.Flatten())
model.add(Dense(units=N_TIME_STEPS//4,
activation="relu"))
model.add(Dense(units=N_LABELS,
activation="softmax",
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.compile(optimizer=Adam(lr=0.01),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=10,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
"""
Explanation: Convolutional Neural Network
The DNN does slightly better. Let's see how a convolutional neural network performs.
A 1-dimensional convolutional can be useful for extracting features from sequential data or deriving features from shorter, fixed-length segments of the data set. Check out the documentation for how to implement a Conv1d in Tensorflow. Max pooling is a downsampling strategy commonly used in conjunction with convolutional neural networks. Next, we'll build a CNN model in keras using the Conv1D to create convolution layers and MaxPool1D to perform max pooling before passing to a fully connected dense layer.
End of explanation
"""
# TODO 2a
model = Sequential()
# Reshape inputs to pass through RNN layer.
model.add(Reshape(target_shape=[N_TIME_STEPS, 1]))
model.add(LSTM(N_TIME_STEPS // 8,
activation='relu',
return_sequences=False))
model.add(Dense(units=N_LABELS,
activation='softmax',
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
# Create the model.
model.compile(optimizer=Adam(lr=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=40,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
"""
Explanation: Recurrent Neural Network
RNNs are particularly well-suited for learning sequential data. They retain state information from one iteration to the next by feeding the output from one cell as input for the next step. In the cell below, we'll build a RNN model in keras. The final state of the RNN is captured and then passed through a fully connected layer to produce a prediction.
End of explanation
"""
# TODO 2b
rnn_hidden_units = [N_TIME_STEPS // 16,
N_TIME_STEPS // 32]
model = Sequential()
# Reshape inputs to pass through RNN layer.
model.add(Reshape(target_shape=[N_TIME_STEPS, 1]))
for layer in rnn_hidden_units[:-1]:
model.add(GRU(units=layer,
activation='relu',
return_sequences=True))
model.add(GRU(units=rnn_hidden_units[-1],
return_sequences=False))
model.add(Dense(units=N_LABELS,
activation="softmax",
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.compile(optimizer=Adam(lr=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=50,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
"""
Explanation: Multi-layer RNN
Next, we'll build multi-layer RNN. Just as multiple layers of a deep neural network allow for more complicated features to be learned during training, additional RNN layers can potentially learn complex features in sequential data. For a multi-layer RNN the output of the first RNN layer is fed as the input into the next RNN layer.
End of explanation
"""
# TODO 3a
model = Sequential()
# Reshape inputs for convolutional layer
model.add(Reshape(target_shape=[N_TIME_STEPS, 1]))
model.add(Conv1D(filters=20,
kernel_size=4,
strides=2,
padding="valid",
input_shape=[None, 1]))
model.add(MaxPool1D(pool_size=2,
strides=None,
padding='valid'))
model.add(LSTM(units=N_TIME_STEPS//2,
return_sequences=False,
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.add(Dense(units=N_LABELS, activation="softmax"))
model.compile(optimizer=Adam(lr=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=30,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
"""
Explanation: Combining CNN and RNN architecture
Finally, we'll look at some model architectures which combine aspects of both convolutional and recurrant networks. For example, we can use a 1-dimensional convolution layer to process our sequences and create features which are then passed to a RNN model before prediction.
End of explanation
"""
# TODO 3b
rnn_hidden_units = [N_TIME_STEPS // 32,
N_TIME_STEPS // 64]
model = Sequential()
# Reshape inputs and pass through RNN layer.
model.add(Reshape(target_shape=[N_TIME_STEPS, 1]))
for layer in rnn_hidden_units:
model.add(LSTM(layer, return_sequences=True))
# Apply 1d convolution to RNN outputs.
model.add(Conv1D(filters=5,
kernel_size=3,
strides=2,
padding="valid"))
model.add(MaxPool1D(pool_size=4,
strides=None,
padding='valid'))
# Flatten the convolution output and pass through DNN.
model.add(tf.keras.layers.Flatten())
model.add(Dense(units=N_TIME_STEPS // 32,
activation="relu",
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.add(Dense(units=N_LABELS,
activation="softmax",
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.compile(optimizer=Adam(lr=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=80,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
"""
Explanation: We can also try building a hybrid model which uses a 1-dimensional CNN to create features from the outputs of an RNN.
End of explanation
"""
|
ThunderShiviah/code_guild | interactive-coding-challenges/graphs_trees/tree_height/height_challenge.ipynb | mit | %run ../bst/bst.py
%load ../bst/bst.py
def height(node):
# TODO: Implement me
pass
"""
Explanation: <small><i>This notebook was prepared by Donne Martin. Source and license info is on GitHub.</i></small>
Challenge Notebook
Problem: Determine the height of a tree.
Constraints
Test Cases
Algorithm
Code
Unit Test
Solution Notebook
Constraints
Is this a binary tree?
Yes
Can we assume we already have a Node class with an insert method?
Yes
Test Cases
5 -> 1
5, 2, 8, 1, 3 -> 3
Algorithm
Refer to the Solution Notebook. If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
Code
End of explanation
"""
# %load test_height.py
from nose.tools import assert_equal
class TestHeight(object):
def test_height(self):
root = Node(5)
assert_equal(height(root), 1)
insert(root, 2)
insert(root, 8)
insert(root, 1)
insert(root, 3)
assert_equal(height(root), 3)
print('Success: test_height')
def main():
test = TestHeight()
test.test_height()
if __name__ == '__main__':
main()
"""
Explanation: Unit Test
The following unit test is expected to fail until you solve the challenge.
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.24/_downloads/568aae18ec92d284aff29cfb5f3c11e7/resolution_metrics.ipynb | bsd-3-clause | # Author: Olaf Hauk <[email protected]>
#
# License: BSD-3-Clause
import mne
from mne.datasets import sample
from mne.minimum_norm import make_inverse_resolution_matrix
from mne.minimum_norm import resolution_metrics
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects/'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
fname_evo = data_path + '/MEG/sample/sample_audvis-ave.fif'
# read forward solution
forward = mne.read_forward_solution(fname_fwd)
# forward operator with fixed source orientations
mne.convert_forward_solution(forward, surf_ori=True,
force_fixed=True, copy=False)
# noise covariance matrix
noise_cov = mne.read_cov(fname_cov)
# evoked data for info
evoked = mne.read_evokeds(fname_evo, 0)
# make inverse operator from forward solution
# free source orientation
inverse_operator = mne.minimum_norm.make_inverse_operator(
info=evoked.info, forward=forward, noise_cov=noise_cov, loose=0.,
depth=None)
# regularisation parameter
snr = 3.0
lambda2 = 1.0 / snr ** 2
"""
Explanation: Compute spatial resolution metrics in source space
Compute peak localisation error and spatial deviation for the point-spread
functions of dSPM and MNE. Plot their distributions and difference of
distributions. This example mimics some results from :footcite:HaukEtAl2019,
namely Figure 3 (peak localisation error for PSFs, L2-MNE vs dSPM) and Figure 4
(spatial deviation for PSFs, L2-MNE vs dSPM).
End of explanation
"""
rm_mne = make_inverse_resolution_matrix(forward, inverse_operator,
method='MNE', lambda2=lambda2)
ple_mne_psf = resolution_metrics(rm_mne, inverse_operator['src'],
function='psf', metric='peak_err')
sd_mne_psf = resolution_metrics(rm_mne, inverse_operator['src'],
function='psf', metric='sd_ext')
del rm_mne
"""
Explanation: MNE
Compute resolution matrices, peak localisation error (PLE) for point spread
functions (PSFs), spatial deviation (SD) for PSFs:
End of explanation
"""
rm_dspm = make_inverse_resolution_matrix(forward, inverse_operator,
method='dSPM', lambda2=lambda2)
ple_dspm_psf = resolution_metrics(rm_dspm, inverse_operator['src'],
function='psf', metric='peak_err')
sd_dspm_psf = resolution_metrics(rm_dspm, inverse_operator['src'],
function='psf', metric='sd_ext')
del rm_dspm, forward
"""
Explanation: dSPM
Do the same for dSPM:
End of explanation
"""
brain_ple_mne = ple_mne_psf.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=1,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_ple_mne.add_text(0.1, 0.9, 'PLE MNE', 'title', font_size=16)
"""
Explanation: Visualize results
Visualise peak localisation error (PLE) across the whole cortex for MNE PSF:
End of explanation
"""
brain_ple_dspm = ple_dspm_psf.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=2,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_ple_dspm.add_text(0.1, 0.9, 'PLE dSPM', 'title', font_size=16)
"""
Explanation: And dSPM:
End of explanation
"""
diff_ple = ple_mne_psf - ple_dspm_psf
brain_ple_diff = diff_ple.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=3,
clim=dict(kind='value', pos_lims=(0., 1., 2.)))
brain_ple_diff.add_text(0.1, 0.9, 'PLE MNE-dSPM', 'title', font_size=16)
"""
Explanation: Subtract the two distributions and plot this difference
End of explanation
"""
brain_sd_mne = sd_mne_psf.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=4,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_sd_mne.add_text(0.1, 0.9, 'SD MNE', 'title', font_size=16)
"""
Explanation: These plots show that dSPM has generally lower peak localization error (red
color) than MNE in deeper brain areas, but higher error (blue color) in more
superficial areas.
Next we'll visualise spatial deviation (SD) across the whole cortex for MNE
PSF:
End of explanation
"""
brain_sd_dspm = sd_dspm_psf.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=5,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_sd_dspm.add_text(0.1, 0.9, 'SD dSPM', 'title', font_size=16)
"""
Explanation: And dSPM:
End of explanation
"""
diff_sd = sd_mne_psf - sd_dspm_psf
brain_sd_diff = diff_sd.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=6,
clim=dict(kind='value', pos_lims=(0., 1., 2.)))
brain_sd_diff.add_text(0.1, 0.9, 'SD MNE-dSPM', 'title', font_size=16)
"""
Explanation: Subtract the two distributions and plot this difference:
End of explanation
"""
|
pydata/xarray | doc/examples/visualization_gallery.ipynb | apache-2.0 | import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import xarray as xr
%matplotlib inline
"""
Explanation: Visualization Gallery
This notebook shows common visualization issues encountered in xarray.
End of explanation
"""
ds = xr.tutorial.load_dataset("air_temperature")
"""
Explanation: Load example dataset:
End of explanation
"""
air = ds.air.isel(time=[0, 724]) - 273.15
# This is the map projection we want to plot *onto*
map_proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=45)
p = air.plot(
transform=ccrs.PlateCarree(), # the data's projection
col="time",
col_wrap=1, # multiplot settings
aspect=ds.dims["lon"] / ds.dims["lat"], # for a sensible figsize
subplot_kws={"projection": map_proj},
) # the plot's projection
# We have to set the map's options on all axes
for ax in p.axes.flat:
ax.coastlines()
ax.set_extent([-160, -30, 5, 75])
"""
Explanation: Multiple plots and map projections
Control the map projection parameters on multiple axes
This example illustrates how to plot multiple maps and control their extent
and aspect ratio.
For more details see this discussion on github.
End of explanation
"""
air = ds.air.isel(time=0)
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(8, 6))
# The first plot (in kelvins) chooses "viridis" and uses the data's min/max
air.plot(ax=ax1, cbar_kwargs={"label": "K"})
ax1.set_title("Kelvins: default")
ax2.set_xlabel("")
# The second plot (in celsius) now chooses "BuRd" and centers min/max around 0
airc = air - 273.15
airc.plot(ax=ax2, cbar_kwargs={"label": "°C"})
ax2.set_title("Celsius: default")
ax2.set_xlabel("")
ax2.set_ylabel("")
# The center doesn't have to be 0
air.plot(ax=ax3, center=273.15, cbar_kwargs={"label": "K"})
ax3.set_title("Kelvins: center=273.15")
# Or it can be ignored
airc.plot(ax=ax4, center=False, cbar_kwargs={"label": "°C"})
ax4.set_title("Celsius: center=False")
ax4.set_ylabel("")
# Make it nice
plt.tight_layout()
"""
Explanation: Centered colormaps
Xarray's automatic colormaps choice
End of explanation
"""
air2d = ds.air.isel(time=500)
# Prepare the figure
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 4))
# Irregular levels to illustrate the use of a proportional colorbar
levels = [245, 250, 255, 260, 265, 270, 275, 280, 285, 290, 310, 340]
# Plot data
air2d.plot(ax=ax1, levels=levels)
air2d.plot(ax=ax2, levels=levels, cbar_kwargs={"ticks": levels})
air2d.plot(
ax=ax3, levels=levels, cbar_kwargs={"ticks": levels, "spacing": "proportional"}
)
# Show plots
plt.tight_layout()
"""
Explanation: Control the plot's colorbar
Use cbar_kwargs keyword to specify the number of ticks.
The spacing kwarg can be used to draw proportional ticks.
End of explanation
"""
air = ds.air - 273.15 # to celsius
# Prepare the figure
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharey=True)
# Selected latitude indices
isel_lats = [10, 15, 20]
# Temperature vs longitude plot - illustrates the "hue" kwarg
air.isel(time=0, lat=isel_lats).plot.line(ax=ax1, hue="lat")
ax1.set_ylabel("°C")
# Temperature vs time plot - illustrates the "x" and "add_legend" kwargs
air.isel(lon=30, lat=isel_lats).plot.line(ax=ax2, x="time", add_legend=False)
ax2.set_ylabel("")
# Show
plt.tight_layout()
"""
Explanation: Multiple lines from a 2d DataArray
Use xarray.plot.line on a 2d DataArray to plot selections as
multiple lines.
See plotting.multiplelines for more details.
End of explanation
"""
da = xr.tutorial.open_rasterio("RGB.byte")
# The data is in UTM projection. We have to set it manually until
# https://github.com/SciTools/cartopy/issues/813 is implemented
crs = ccrs.UTM("18")
# Plot on a map
ax = plt.subplot(projection=crs)
da.plot.imshow(ax=ax, rgb="band", transform=crs)
ax.coastlines("10m", color="r")
"""
Explanation: imshow() and rasterio map projections
Using rasterio's projection information for more accurate plots.
This example extends recipes.rasterio and plots the image in the
original map projection instead of relying on pcolormesh and a map
transformation.
End of explanation
"""
from pyproj import Transformer
import numpy as np
da = xr.tutorial.open_rasterio("RGB.byte")
x, y = np.meshgrid(da["x"], da["y"])
transformer = Transformer.from_crs(da.crs, "EPSG:4326", always_xy=True)
lon, lat = transformer.transform(x, y)
da.coords["lon"] = (("y", "x"), lon)
da.coords["lat"] = (("y", "x"), lat)
# Compute a greyscale out of the rgb image
greyscale = da.mean(dim="band")
# Plot on a map
ax = plt.subplot(projection=ccrs.PlateCarree())
greyscale.plot(
ax=ax,
x="lon",
y="lat",
transform=ccrs.PlateCarree(),
cmap="Greys_r",
shading="auto",
add_colorbar=False,
)
ax.coastlines("10m", color="r")
"""
Explanation: Parsing rasterio geocoordinates
Converting a projection's cartesian coordinates into 2D longitudes and
latitudes.
These new coordinates might be handy for plotting and indexing, but it should
be kept in mind that a grid which is regular in projection coordinates will
likely be irregular in lon/lat. It is often recommended to work in the data's
original map projection (see recipes.rasterio_rgb).
End of explanation
"""
|
ffmmjj/intro_to_data_science_workshop | solutions/.ipynb_checkpoints/Boston housing prices prediction-checkpoint.ipynb | apache-2.0 | # Make sure you have a working installation of pandas by executing this cell
import pandas as pd
"""
Explanation: Regression problems involve the prediction of a continuous, numeric value from a set of characteristics.
In this example, we'll build a model to predict house prices from characteristics like the number of rooms and the crime rate at the house location.
Reading data
We'll be using the pandas package to read data.
Pandas is an open source library that can be used to read formatted data files into tabular structures that can be processed by python scripts.
End of explanation
"""
boston_housing_data = pd.read_csv('../datasets/boston.csv')
"""
Explanation: In this exercise, we'll use the Boston Housing dataset to predict house prices from characteristics like the number of rooms and distance to employment centers.
End of explanation
"""
boston_housing_data.head()
boston_housing_data.info()
boston_housing_data.describe()
"""
Explanation: Pandas allows reading our data from different file formats and sources. See this link for a list of supported operations.
End of explanation
"""
datasets = pd.read_csv('../datasets/anscombe.csv')
for i in range(1, 5):
dataset = datasets[datasets.Source == 1]
print('Dataset {} (X, Y) mean: {}'.format(i, (dataset.x.mean(), dataset.y.mean())))
print('\n')
for i in range(1, 5):
dataset = datasets[datasets.Source == 1]
print('Dataset {} (X, Y) std deviation: {}'.format(i, (dataset.x.std(), dataset.y.std())))
print('\n')
for i in range(1, 5):
dataset = datasets[datasets.Source == 1]
print('Dataset {} correlation between X and Y: {}'.format(i, dataset.x.corr(dataset.y)))
"""
Explanation: Visualizing data
After reading our data into a pandas DataFrame and getting a broader view of the dataset, we can build charts to visualize tha "shape" of the data.
We'll use python's Matplotlib library to create these charts.
An example
Suppose you're given the following information about four datasets:
End of explanation
"""
import matplotlib.pyplot as plt
# This line makes the graphs appear as cell outputs rather than in a separate window or file.
%matplotlib inline
# Extract the house prices and average number of rooms to two separate variables
prices = boston_housing_data.medv
rooms = boston_housing_data.rm
# Create a scatterplot of these two properties using plt.scatter()
plt.scatter(rooms, prices)
# Specify labels for the X and Y axis
plt.xlabel('Number of rooms')
plt.ylabel('House price')
# Show graph
plt.show()
# Extract the house prices and average number of rooms to two separate variables
prices = boston_housing_data.medv
nox = boston_housing_data.nox
# Create a scatterplot of these two properties using plt.scatter()
plt.scatter(nox, prices)
# Specify labels for the X and Y axis
plt.xlabel('Nitric oxide concentration')
plt.ylabel('House price')
# Show graph
plt.show()
"""
Explanation: They all have roughly the same mean, standard deviations and correlation. How similar are they?
This dataset is known as the Anscombe's Quartet and it's used to illustrate how tricky it can be to trust only summary statistics to characterize a dataset.
End of explanation
"""
from sklearn.linear_model import LinearRegression
x = boston_housing_data.rm.values.reshape(-1, 1)
y = boston_housing_data.medv.values.reshape(-1, 1)
lr = LinearRegression().fit(x, y)
lr.predict(6)
"""
Explanation: Predicting house prices
We could see in the previous graphs that some features have a roughy linear relationship to the house prices. We'll use Scikit-Learn's LinearRegression to model this data and predict house prices from other information.
The example below builds a LinearRegression model using the average number of rooms to predict house prices:
End of explanation
"""
X = boston_housing_data.drop('medv', axis=1)
t = boston_housing_data.medv.values.reshape(-1, 1)
# Use sklean's train_test_plit() method to split our data into two sets.
# See http://scikit-learn.org/0.17/modules/generated/sklearn.cross_validation.train_test_split.html#sklearn.cross_validation.train_test_split
from sklearn.cross_validation import train_test_split
Xtr, Xts, ytr, yts = train_test_split(X, t)
# Use the training set to build a LinearRegression model
lr = LinearRegression().fit(Xtr, ytr)
# Use the validation set to assess the model's performance.
# See http://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html
from sklearn.metrics import mean_squared_error
mean_squared_error(yts, lr.predict(Xts))
"""
Explanation: We'll now use all the features in the dataset to predict house prices.
Let's start by splitting our data into a training set and a validation set. The training set will be used to train our linear model; the validation set, on the other hand, will be used to assess how accurate our model is.
End of explanation
"""
|
metpy/MetPy | v0.9/_downloads/0fad3c70b425eaed875fe7cd5ea738b8/Advanced_Sounding.ipynb | bsd-3-clause | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, SkewT
from metpy.units import units
"""
Explanation: Advanced Sounding
Plot a sounding using MetPy with more advanced features.
Beyond just plotting data, this uses calculations from metpy.calc to find the lifted
condensation level (LCL) and the profile of a surface-based parcel. The area between the
ambient profile and the parcel profile is colored as well.
End of explanation
"""
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('may4_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = mpcalc.wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
"""
Explanation: Upper air data can be obtained using the siphon package, but for this example we will use
some of MetPy's sample data.
End of explanation
"""
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.wind_components(wind_speed, wind_dir)
"""
Explanation: We will pull the data out of the example dataset into individual variables and
assign units.
End of explanation
"""
fig = plt.figure(figsize=(9, 9))
add_metpy_logo(fig, 115, 100)
skew = SkewT(fig, rotation=45)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Calculate LCL height and plot as black dot
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Calculate full parcel profile and add to plot as black line
prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
skew.plot(p, prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, prof)
skew.shade_cape(p, T, prof)
# An example of a slanted line at constant T -- in this case the 0
# isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Show the plot
plt.show()
"""
Explanation: Create a new figure. The dimensions here give a good aspect ratio.
End of explanation
"""
|
flyflyjean/python-ay250-homeworks | hw_2/hw_2_assignment.ipynb | mit | %matplotlib inline
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
img=mpimg.imread('problem0.png')
plt.imshow(img)
"""
Explanation: problem 0
End of explanation
"""
%matplotlib inline
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
img=mpimg.imread('jd.png')
plt.imshow(img)
"""
Explanation: On the left it shows two RNA fragments with different length. The figure tries to compare how the two RNA induce the formation a protein capsid under different concentration. It did well in labeling, making the reader clearly know how to match the data with the object. However, I feel it includes too much imformation without sufficient clarificaiton in the figure. For example, it has three x axis but it looks confusing to me whether the concentraion and time are independent or corelated. Also, the concentration tick labels are different but it uses the orange dashed line to align them. It is unclear what these grid lines are drawn for. I think it can broke down into two figures to resolve those confusion. There was a gap in the concentration of the black line, which I do not think is necessary.
problem 1
End of explanation
"""
%matplotlib notebook
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import pandas as pd
dt = [('x', int), ('y', float), ('z', float)]
position = pd.read_excel('C343_Et_ipr.xls', header = None, names = ['x', 'y', 'z'], skiprows = 1)
fig = plt.figure(figsize = (10,7))
axx = fig.add_subplot(111, projection='3d')
scat = axx.scatter(position['x'], position['y'], position['z'], zdir = 'z', alpha = 0.1, c = position.index)
fig.colorbar(scat)
axx.set_xlabel('X-coordinate', fontsize = 15)
axx.set_ylabel('Y-coordinate', fontsize = 15)
axx.set_zlabel('Z-coordinate', fontsize = 15)
"""
Explanation: The original plot shows how one atom move reletive to another atom at the origin with respect to time. The trajectory ends up with a tight cluster, showing after a certain period of time, the atom reaches an equilibrated state. I tried to improve this plot by changing the path plot to a scatter plot and reduced the transpanrency of the dots. Consequently, the darker one area is, the more probability the atom will stay in that area. Also, I colormapped the dots with respect to time, so it can directly tell that at the very beginning stage, the movement of atom is very random, when it reaches a favorable state, it will just fluctuate around that state. The matplotlib notebook makes the plot interactive and the I can easily rotate the plot to get a better understanding of the spatical distribution.
End of explanation
"""
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
dt1 = [('date', int), ('value', float)]
dt2 = [('date', int), ('temp', float)]
google = np.loadtxt('google_data.txt', dt1, skiprows = 1)
yahoo = np.loadtxt('yahoo_data.txt', dt1, skiprows = 1)
ny = np.loadtxt('ny_temps.txt', dt2, skiprows = 1)
fig, ax0 = plt.subplots(1, 1, figsize = (12,8))
ax0.set_title('New York Temperature, Google and Yahoo!', fontsize = 30, fontname = 'Times New Roman')
ax0.title.set_y(1.03)
#ax0.title.set_fontsize(28)
lns1 = ax0.plot(yahoo['date'], yahoo['value'], color = 'purple', label = 'Yahoo! Stock Value')
lns2 = ax0.plot(google['date'], google['value'], color = 'blue', label = 'Google Stock Value')
ax0.set_xlabel('Date(MJD)', fontsize = 15, fontname = 'Arial')
ax0.set_ylabel('Value(Dollars)', fontsize = 15, fontname = 'Arial')
ax0.spines['top'].set_linewidth(2)
ax0.spines['bottom'].set_linewidth(2)
ax0.spines['left'].set_linewidth(2)
ax0.spines['right'].set_linewidth(2)
ax0.minorticks_on()
ax0.tick_params(axis='x', which='major', labelsize=10, length = 8, width = 2, top = 'off')
ax0.tick_params(axis='x', which='minor', length = 4, top = 'off')
ax0.tick_params(axis='y', which='major', labelsize=10, length = 8, width = 2, right = 'off')
ax0.tick_params(axis='y', which='minor', length = 4, right = 'off')
ax0.set_ylabel('Value(Dollars)', fontsize = 15, fontname = 'Arial')
ax1 = ax0.twinx()
lns3 = ax1.plot(ny['date'], ny['temp'], ls = 'dashed', color = 'red', label = 'NY Mon. High Temp')
ax1.set_ylim(-150,100)
ax1.set_ylabel('Temperature($^\circ$F)', fontsize = 15, fontname = 'Arial')
ax1.minorticks_on()
ax1.tick_params(axis='y', which='major', labelsize=10, length = 8, width = 2, left = 'off')
ax1.tick_params(axis='y', which='minor', length = 4, left = 'off')
lns = lns1+lns2+lns3
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, loc=(0.07, 0.5), frameon = False, handlelength = 5)
"""
Explanation: problem 2
End of explanation
"""
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import pandas as pd
class drawrect:
def __init__(self, fig, axarr, data):
self.fig = fig
self.axarr = axarr
self.data = data
self.length = len(data.columns) - 1
self.ax = None
self.rect = None
self.x0 = None
self.y0 = None
self.x1 = None
self.y1 = None
self.fig.canvas.mpl_connect('button_press_event', self.on_press)
self.fig.canvas.mpl_connect('button_release_event', self.on_release)
self.fig.canvas.mpl_connect('key_press_event', self.on_keyd)
dmax = []
dmin = []
for i in data.columns:
dmax.append(np.max(data[i]))
dmin.append(np.min(data[i]))
for i in range(self.length):
axarr[0, i].set_xlim(dmin[3-i], dmax[3-i])
axarr[i, 0].set_ylim(dmin[i], dmax[i])
# Colorarr was generated to record the color of each datapoint
self.colorarr = []
self.colordict = {'setosa':'red', 'versicolor':'green', 'virginica':'blue' }
for i in data[data.columns[-1]]:
self.colorarr.append(self.colordict[i])
# Seld.findaxes is a dictionary which corelates the axes and its x, y axis information
self.findaxes = {}
for y_subplot in range(self.length):
for x_subplot in range(self.length):
self.findaxes.update({
str(axarr[y_subplot, x_subplot]):[data.columns[3 - x_subplot], data.columns[y_subplot]]
})
# Plot the input data
self.scatlist = []
for i in range(self.length):
for j in range(self.length):
scat = self.axarr[i,j].scatter(
self.data[self.findaxes[str(self.axarr[i,j])][0]], self.data[self.findaxes[str(self.axarr[i,j])][1]],
color = self.colorarr, alpha = 0.3
)
self.scatlist.append(scat)
self.axarr[i, j].tick_params(axis='both', which='major', labelsize=8)
self.scatarr = np.asarray(self.scatlist).reshape(self.length, self.length)
self.fig.canvas.draw()
def on_press(self, event):
if not self.x0:
self.x0 = event.xdata
self.y0 = event.ydata
self.ax = event.inaxes
def on_release(self, event):
if not self.rect:
self.x1 = event.xdata
self.y1 = event.ydata
self.rect = Rectangle((self.x0, self.y0), self.x1 - self.x0, self.y1 - self.y0, color = 'orange', alpha = 0.3)
self.ax.add_patch(self.rect)
self.update_color(self.data)
def on_keyd(self, event):
if event.key == 'd':
self.ax.patches.pop()
self.fig.canvas.draw()
self.ax = None
self.rect = None
self.x0 = None
self.y0 = None
self.x1 = None
self.y1 = None
self.update_plot(self.colorarr)
def update_color(self, data):
if self.rect:
new_colorarr = np.copy(self.colorarr)
xmin = min(self.x0, self.x1)
xmax = max(self.x0, self.x1)
ymin = min(self.y0, self.y1)
ymax = max(self.y0, self.y1)
x_now = self.findaxes[str(self.ax)][0]
y_now = self.findaxes[str(self.ax)][1]
for n in range(len(data[x_now])):
if not ((xmin <= data[x_now][n] <= xmax) and (ymin <= data[y_now][n] <= ymax)):
new_colorarr[n] = 'gray'
self.update_plot(new_colorarr)
def update_plot(self, color_current):
for i in range(self.length):
for j in range(self.length):
self.scatarr[i][j].set_color(color_current)
self.fig.canvas.draw()
fig1, axarr1 = plt.subplots(4, 4, sharex="col", sharey="row", figsize=(10, 10))
data1 = pd.read_csv('flowers.csv')
a = drawrect(fig1, axarr1, data1)
plt.show()
"""
Explanation: problem 3
End of explanation
"""
|
Kaggle/learntools | notebooks/data_cleaning/raw/tut4.ipynb | apache-2.0 | # modules we'll use
import pandas as pd
import numpy as np
# helpful character encoding module
import chardet
# set seed for reproducibility
np.random.seed(0)
"""
Explanation: In this notebook, we're going to be working with different character encodings.
Let's get started!
Get our environment set up
The first thing we'll need to do is load in the libraries we'll be using. Not our dataset, though: we'll get to it later!
End of explanation
"""
# start with a string
before = "This is the euro symbol: €"
# check to see what datatype it is
type(before)
"""
Explanation: What are encodings?
Character encodings are specific sets of rules for mapping from raw binary byte strings (that look like this: 0110100001101001) to characters that make up human-readable text (like "hi"). There are many different encodings, and if you tried to read in text with a different encoding than the one it was originally written in, you ended up with scrambled text called "mojibake" (said like mo-gee-bah-kay). Here's an example of mojibake:
æ–‡å—化ã??
You might also end up with a "unknown" characters. There are what gets printed when there's no mapping between a particular byte and a character in the encoding you're using to read your byte string in and they look like this:
����������
Character encoding mismatches are less common today than they used to be, but it's definitely still a problem. There are lots of different character encodings, but the main one you need to know is UTF-8.
UTF-8 is the standard text encoding. All Python code is in UTF-8 and, ideally, all your data should be as well. It's when things aren't in UTF-8 that you run into trouble.
It was pretty hard to deal with encodings in Python 2, but thankfully in Python 3 it's a lot simpler. (Kaggle Notebooks only use Python 3.) There are two main data types you'll encounter when working with text in Python 3. One is is the string, which is what text is by default.
End of explanation
"""
# encode it to a different encoding, replacing characters that raise errors
after = before.encode("utf-8", errors="replace")
# check the type
type(after)
"""
Explanation: The other data is the bytes data type, which is a sequence of integers. You can convert a string into bytes by specifying which encoding it's in:
End of explanation
"""
# take a look at what the bytes look like
after
"""
Explanation: If you look at a bytes object, you'll see that it has a b in front of it, and then maybe some text after. That's because bytes are printed out as if they were characters encoded in ASCII. (ASCII is an older character encoding that doesn't really work for writing any language other than English.) Here you can see that our euro symbol has been replaced with some mojibake that looks like "\xe2\x82\xac" when it's printed as if it were an ASCII string.
End of explanation
"""
# convert it back to utf-8
print(after.decode("utf-8"))
"""
Explanation: When we convert our bytes back to a string with the correct encoding, we can see that our text is all there correctly, which is great! :)
End of explanation
"""
# try to decode our bytes with the ascii encoding
print(after.decode("ascii"))
"""
Explanation: However, when we try to use a different encoding to map our bytes into a string, we get an error. This is because the encoding we're trying to use doesn't know what to do with the bytes we're trying to pass it. You need to tell Python the encoding that the byte string is actually supposed to be in.
You can think of different encodings as different ways of recording music. You can record the same music on a CD, cassette tape or 8-track. While the music may sound more-or-less the same, you need to use the right equipment to play the music from each recording format. The correct decoder is like a cassette player or a CD player. If you try to play a cassette in a CD player, it just won't work.
End of explanation
"""
# start with a string
before = "This is the euro symbol: €"
# encode it to a different encoding, replacing characters that raise errors
after = before.encode("ascii", errors = "replace")
# convert it back to utf-8
print(after.decode("ascii"))
# We've lost the original underlying byte string! It's been
# replaced with the underlying byte string for the unknown character :(
"""
Explanation: We can also run into trouble if we try to use the wrong encoding to map from a string to bytes. Like I said earlier, strings are UTF-8 by default in Python 3, so if we try to treat them like they were in another encoding we'll create problems.
For example, if we try to convert a string to bytes for ASCII using encode(), we can ask for the bytes to be what they would be if the text was in ASCII. Since our text isn't in ASCII, though, there will be some characters it can't handle. We can automatically replace the characters that ASCII can't handle. If we do that, however, any characters not in ASCII will just be replaced with the unknown character. Then, when we convert the bytes back to a string, the character will be replaced with the unknown character. The dangerous part about this is that there's not way to tell which character it should have been. That means we may have just made our data unusable!
End of explanation
"""
# try to read in a file not in UTF-8
kickstarter_2016 = pd.read_csv("../input/kickstarter-projects/ks-projects-201612.csv")
"""
Explanation: This is bad and we want to avoid doing it! It's far better to convert all our text to UTF-8 as soon as we can and keep it in that encoding. The best time to convert non UTF-8 input into UTF-8 is when you read in files, which we'll talk about next.
Reading in files with encoding problems
Most files you'll encounter will probably be encoded with UTF-8. This is what Python expects by default, so most of the time you won't run into problems. However, sometimes you'll get an error like this:
End of explanation
"""
# look at the first ten thousand bytes to guess the character encoding
with open("../input/kickstarter-projects/ks-projects-201801.csv", 'rb') as rawdata:
result = chardet.detect(rawdata.read(10000))
# check what the character encoding might be
print(result)
"""
Explanation: Notice that we get the same UnicodeDecodeError we got when we tried to decode UTF-8 bytes as if they were ASCII! This tells us that this file isn't actually UTF-8. We don't know what encoding it actually is though. One way to figure it out is to try and test a bunch of different character encodings and see if any of them work. A better way, though, is to use the chardet module to try and automatically guess what the right encoding is. It's not 100% guaranteed to be right, but it's usually faster than just trying to guess.
I'm going to just look at the first ten thousand bytes of this file. This is usually enough for a good guess about what the encoding is and is much faster than trying to look at the whole file. (Especially with a large file this can be very slow.) Another reason to just look at the first part of the file is that we can see by looking at the error message that the first problem is the 11th character. So we probably only need to look at the first little bit of the file to figure out what's going on.
End of explanation
"""
# read in the file with the encoding detected by chardet
kickstarter_2016 = pd.read_csv("../input/kickstarter-projects/ks-projects-201612.csv", encoding='Windows-1252')
# look at the first few lines
kickstarter_2016.head()
"""
Explanation: So chardet is 73% confidence that the right encoding is "Windows-1252". Let's see if that's correct:
End of explanation
"""
# save our file (will be saved as UTF-8 by default!)
kickstarter_2016.to_csv("ks-projects-201801-utf8.csv")
"""
Explanation: Yep, looks like chardet was right! The file reads in with no problem (although we do get a warning about datatypes) and when we look at the first few rows it seems to be fine.
What if the encoding chardet guesses isn't right? Since chardet is basically just a fancy guesser, sometimes it will guess the wrong encoding. One thing you can try is looking at more or less of the file and seeing if you get a different result and then try that.
Saving your files with UTF-8 encoding
Finally, once you've gone through all the trouble of getting your file into UTF-8, you'll probably want to keep it that way. The easiest way to do that is to save your files with UTF-8 encoding. The good news is, since UTF-8 is the standard encoding in Python, when you save a file it will be saved as UTF-8 by default:
End of explanation
"""
|
IACS-CS-207/cs207-F17 | lectures/L9/L9.ipynb | mit | from IPython.display import HTML
"""
Explanation: Lecture 9
Object Oriented Programming
Monday, October 2nd 2017
End of explanation
"""
def Complex(a, b): # constructor
return (a,b)
def real(c): # method
return c[0]
def imag(c):
return c[1]
def str_complex(c):
return "{0}+{1}i".format(c[0], c[1])
c1 = Complex(1,2) # constructor
print(real(c1), " ", str_complex(c1))
"""
Explanation: Motiviation
We would like to find a way to represent complex, structured data in the context of our programming language.
For example, to represent a location, we might want to associate a name, a latitude and a longitude with it.
Thus we would want to create a compound data type which carries this information.
In C, for example, this is a struct:
C
struct location {
float longitude;
float latitude;
}
REMEMBER: A language has 3 parts:
expressions and statements: how to structure simple computations
means of combination: how to structure complex computations
means of abstraction: how to build complex units
Review
When we write a function, we give it some sensible name which can then be used by a "client" programmer. We don't care about how this function is implemented. We just want to know its signature (API) and use it.
In a similar way, we want to encapsulate our data: we dont want to know how it is stored and all that. We just want to be able to use it. This is one of the key ideas behind object oriented programming.
To do this, write constructors that make objects. We also write other functions that access or change data on the object. These functions are called the "methods" of the object, and are what the client programmer uses.
First Examples
Objects thru tuples: An object for complex numbers
How might we implement such objects? First, lets think of tuples.
End of explanation
"""
c1[0]
"""
Explanation: But things aren't hidden so I can get through the interface:
End of explanation
"""
c1[0]=2
"""
Explanation: Because I used a tuple, and a tuple is immutable, I can't change this complex number once it's created.
End of explanation
"""
def Complex2(a, b): # constructor
def dispatch(message): # capture a and b at constructor-run time
if message=="real":
return a
elif message=='imag':
return b
elif message=="str":
return "{0}+{1}i".format(a, b)
return dispatch
z=Complex2(1,2)
print(z("real"), " ", z("imag"), " ", z("str"))
"""
Explanation: Objects thru closures
Let's try an implementation that uses a closure to capture the value of arguments.
End of explanation
"""
def Complex3(a, b):
in_a=a
in_b=b
def dispatch(message, value=None):
nonlocal in_a, in_b
if message=='set_real' and value != None:
in_a = value
elif message=='set_imag' and value != None:
in_b = value
elif message=="real":
return in_a
elif message=='imag':
return in_b
elif message=="str":
return "{0}+{1}i".format(in_a, in_b)
return dispatch
c3=Complex3(1,2)
print(c3("real"), " ", c3("imag"), " ", c3("str"))
c3('set_real', 2)
print(c3("real"), " ", c3("imag"), " ", c3("str"))
"""
Explanation: This looks pretty good so far.
The only problem is that we don't have a way to change the real and imaginary parts.
For this, we need to add things called setters.
Objects with Setters
End of explanation
"""
class ComplexClass():
def __init__(self, a, b):
self.real = a
self.imaginary = b
"""
Explanation: Python Classes and instance variables
We constructed an object system above. But Python comes with its own.
Classes allow us to define our own types in the Python type system.
End of explanation
"""
HTML('<iframe width="800" height="500" frameborder="0" src="http://pythontutor.com/iframe-embed.html#code=class%20ComplexClass%28%29%3A%0A%20%20%20%20%0A%20%20%20%20def%20__init__%28self,%20a,%20b%29%3A%0A%20%20%20%20%20%20%20%20self.real%20%3D%20a%0A%20%20%20%20%20%20%20%20self.imaginary%20%3D%20b%0A%0Ac1%20%3D%20ComplexClass%281,2%29&codeDivHeight=400&codeDivWidth=350&cumulative=false&curInstr=0&heapPrimitives=false&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false"> </iframe>')
c1 = ComplexClass(1,2)
print(c1, c1.real)
print(vars(c1), " ",type(c1))
c1.real=5.0
print(c1, " ", c1.real, " ", c1.imaginary)
"""
Explanation: __init__ is a special method run automatically by Python.
It is a constructor.
self is the instance of the object.
It acts like this in C++ but self is explicit.
End of explanation
"""
class Animal():
def __init__(self, name):
self.name = name
def make_sound(self):
raise NotImplementedError
class Dog(Animal):
def make_sound(self):
return "Bark"
class Cat(Animal):
def __init__(self, name):
self.name = "A very interesting cat: {}".format(name)
def make_sound(self):
return "Meow"
"""
Explanation: Inheritance and Polymorphism
Inheritance
Inheritance is the idea that a "Cat" is-a "Animal" and a "Dog" is-a "Animal".
Animals make sounds, but Cats Meow and Dogs Bark.
Inheritance makes sure that methods not defined in a child are found and used from a parent.
Polymorphism
Polymorphism is the idea that an interface is specified, but not necessarily implemented, by a superclass and then the interface is implemented in subclasses (differently).
[Actually Polymorphism is much more complex and interesting than this, and this definition is really an outcome of polymorphism. But we'll come to this later.]
Example: Super- and subclasses
End of explanation
"""
a0 = Animal("David")
print(a0.name)
a0.make_sound()
a1 = Dog("Snoopy")
a2 = Cat("Hello Kitty")
animals = [a1, a2]
for a in animals:
print(a.name)
print(isinstance(a, Animal))
print(a.make_sound())
print('--------')
print(a1.make_sound, " ", Dog.make_sound)
print(a1.make_sound())
print('----')
print(Dog.make_sound(a1))
Dog.make_sound()
"""
Explanation: Animal is the superclass (a.k.a the base class).
Dog and Cat are both subclasses (a.k.a derived classes) of the Animal superclass.
Using the Animal class
End of explanation
"""
HTML('<iframe width="800" height="500" frameborder="0" src="http://pythontutor.com/iframe-embed.html#code=class%20Animal%28%29%3A%0A%20%20%20%20%0A%20%20%20%20def%20__init__%28self,%20name%29%3A%0A%20%20%20%20%20%20%20%20self.name%20%3D%20name%0A%20%20%20%20%20%20%20%20%0A%20%20%20%20def%20make_sound%28self%29%3A%0A%20%20%20%20%20%20%20%20raise%20NotImplementedError%0A%20%20%20%20%0Aclass%20Dog%28Animal%29%3A%0A%20%20%20%20%0A%20%20%20%20def%20make_sound%28self%29%3A%0A%20%20%20%20%20%20%20%20return%20%22Bark%22%0A%20%20%20%20%0Aclass%20Cat%28Animal%29%3A%0A%20%20%20%20%0A%20%20%20%20def%20__init__%28self,%20name%29%3A%0A%20%20%20%20%20%20%20%20self.name%20%3D%20%22A%20very%20interesting%20cat%3A%20%7B%7D%22.format%28name%29%0A%20%20%20%20%20%20%20%20%0A%20%20%20%20def%20make_sound%28self%29%3A%0A%20%20%20%20%20%20%20%20return%20%22Meow%22%0A%0Aa1%20%3D%20Dog%28%22Snoopy%22%29%0Aa2%20%3D%20Cat%28%22Hello%20Kitty%22%29%0Aanimals%20%3D%20%5Ba1,%20a2%5D%0Afor%20a%20in%20animals%3A%0A%20%20%20%20print%28a.name%29%0A%20%20%20%20print%28isinstance%28a,%20Animal%29%29%0A%20%20%20%20print%28a.make_sound%28%29%29%0A%20%20%20%20print%28\'--------\'%29&codeDivHeight=400&codeDivWidth=350&cumulative=false&curInstr=0&heapPrimitives=false&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false"> </iframe>')
"""
Explanation: How does this all work?
End of explanation
"""
class Animal():
def __init__(self, name):
self.name=name
print("Name is", self.name)
class Mouse(Animal):
def __init__(self, name):
self.animaltype="prey"
super().__init__(name)
print("Created %s as %s" % (self.name, self.animaltype))
class Cat(Animal):
pass
a1 = Mouse("Tom")
print(vars(a1))
a2 = Cat("Jerry")
print(vars(a2))
HTML('<iframe width="800" height="500" frameborder="0" src="http://pythontutor.com/iframe-embed.html#code=class%20Animal%28%29%3A%0A%20%20%20%20%0A%20%20%20%20def%20__init__%28self,%20name%29%3A%0A%20%20%20%20%20%20%20%20self.name%3Dname%0A%20%20%20%20%20%20%20%20print%28%22Name%20is%22,%20self.name%29%0A%20%20%20%20%20%20%20%20%0Aclass%20Mouse%28Animal%29%3A%0A%20%20%20%20def%20__init__%28self,%20name%29%3A%0A%20%20%20%20%20%20%20%20self.animaltype%3D%22prey%22%0A%20%20%20%20%20%20%20%20super%28%29.__init__%28name%29%0A%20%20%20%20%20%20%20%20print%28%22Created%20%25s%20as%20%25s%22%20%25%20%28self.name,%20self.animaltype%29%29%0A%20%20%20%20%0Aclass%20Cat%28Animal%29%3A%0A%20%20%20%20pass%0A%0Aa1%20%3D%20Mouse%28%22Tom%22%29%0Aa2%20%3D%20Cat%28%22Jerry%22%29&codeDivHeight=400&codeDivWidth=350&cumulative=false&curInstr=0&heapPrimitives=false&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false"> </iframe>')
"""
Explanation: Calling a superclasses initializer
Say we dont want to do all the work of setting the name variable in the subclasses.
We can set this "common" work up in the superclass and use super to call the superclass's initializer from the subclass.
There's another way to think about this:
A subclass method will be called instead of a superclass method if the method is in both the sub- and superclass and we call the subclass (polymorphism!).
If we really want the superclass method, then we can use the super built-in function.
See https://rhettinger.wordpress.com/2011/05/26/super-considered-super/
End of explanation
"""
# Both implement the "Animal" Protocol, which consists of the one make_sound function
class Dog():
def make_sound(self):
return "Bark"
class Cat():
def make_sound(self):
return "Meow"
a1 = Dog()
a2 = Cat()
animals = [a1, a2]
for a in animals:
print(isinstance(a, Animal), " ", a.make_sound())
"""
Explanation: Interfaces
The above examples show inheritance and polymorphism.
Notice that we didn't actually need to set up the inheritance.
We could have just defined 2 different classes and have them both make_sound.
In Java and C++ this is done more formally through Interfaces and Abstract Base Classes, respectively, plus inheritance.
In Python, this agreement to define make_sound is called duck typing.
"If it walks like a duck and quacks like a duck, it is a duck."
End of explanation
"""
class Animal():
def __init__(self, name):
self.name=name
def __repr__(self):
class_name = type(self).__name__
return "{0!s}({1.name!r})".format(class_name, self)
r = Animal("David")
r
print(r)
repr(r)
"""
Explanation: The Python Data Model
Duck typing is used throughout Python. Indeed it's what enables the "Python Data Model"
All python classes implicitly inherit from the root object class.
The Pythonic way is to just document your interface and implement it.
This usage of common interfaces is pervasive in dunder functions to comprise the Python data model.
Example: Printing with __repr__ and __str__
The way printing works is that Python wants classes to implement __repr__ and __str__ methods.
It will use inheritance to give the built-in objects methods when these are not defined.
Any class can define __repr__ and __str__.
When an instance of such a class is interrogated with the repr or str function, then these underlying methods are called.
We'll see __repr__ here. If you define __repr__ you have made an object sensibly printable.
__repr__
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.19/_downloads/1935e973eb220e31cb4a6a6541231eb1/plot_background_statistics.ipynb | bsd-3-clause | # Authors: Eric Larson <[email protected]>
# License: BSD (3-clause)
from functools import partial
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa, analysis:ignore
import mne
from mne.stats import (ttest_1samp_no_p, bonferroni_correction, fdr_correction,
permutation_t_test, permutation_cluster_1samp_test)
print(__doc__)
"""
Explanation: Statistical inference
Here we will briefly cover multiple concepts of inferential statistics in an
introductory manner, and demonstrate how to use some MNE statistical functions.
:depth: 3
End of explanation
"""
width = 40
n_subjects = 10
signal_mean = 100
signal_sd = 100
noise_sd = 0.01
gaussian_sd = 5
sigma = 1e-3 # sigma for the "hat" method
n_permutations = 'all' # run an exact test
n_src = width * width
# For each "subject", make a smoothed noisy signal with a centered peak
rng = np.random.RandomState(2)
X = noise_sd * rng.randn(n_subjects, width, width)
# Add a signal at the center
X[:, width // 2, width // 2] = signal_mean + rng.randn(n_subjects) * signal_sd
# Spatially smooth with a 2D Gaussian kernel
size = width // 2 - 1
gaussian = np.exp(-(np.arange(-size, size + 1) ** 2 / float(gaussian_sd ** 2)))
for si in range(X.shape[0]):
for ri in range(X.shape[1]):
X[si, ri, :] = np.convolve(X[si, ri, :], gaussian, 'same')
for ci in range(X.shape[2]):
X[si, :, ci] = np.convolve(X[si, :, ci], gaussian, 'same')
"""
Explanation: Hypothesis testing
Null hypothesis
^^^^^^^^^^^^^^^
From Wikipedia <https://en.wikipedia.org/wiki/Null_hypothesis>__:
In inferential statistics, a general statement or default position that
there is no relationship between two measured phenomena, or no
association among groups.
We typically want to reject a null hypothesis with
some probability (e.g., p < 0.05). This probability is also called the
significance level $\alpha$.
To think about what this means, let's follow the illustrative example from
[1]_ and construct a toy dataset consisting of a 40 x 40 square with a
"signal" present in the center with white noise added and a Gaussian
smoothing kernel applied.
End of explanation
"""
fig, ax = plt.subplots()
ax.imshow(X.mean(0), cmap='inferno')
ax.set(xticks=[], yticks=[], title="Data averaged over subjects")
"""
Explanation: The data averaged over all subjects looks like this:
End of explanation
"""
titles = ['t']
out = stats.ttest_1samp(X, 0, axis=0)
ts = [out[0]]
ps = [out[1]]
mccs = [False] # these are not multiple-comparisons corrected
def plot_t_p(t, p, title, mcc, axes=None):
if axes is None:
fig = plt.figure(figsize=(6, 3))
axes = [fig.add_subplot(121, projection='3d'), fig.add_subplot(122)]
show = True
else:
show = False
p_lims = [0.1, 0.001]
t_lims = -stats.distributions.t.ppf(p_lims, n_subjects - 1)
p_lims = [-np.log10(p) for p in p_lims]
# t plot
x, y = np.mgrid[0:width, 0:width]
surf = axes[0].plot_surface(x, y, np.reshape(t, (width, width)),
rstride=1, cstride=1, linewidth=0,
vmin=t_lims[0], vmax=t_lims[1], cmap='viridis')
axes[0].set(xticks=[], yticks=[], zticks=[],
xlim=[0, width - 1], ylim=[0, width - 1])
axes[0].view_init(30, 15)
cbar = plt.colorbar(ax=axes[0], shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025, mappable=surf)
cbar.set_ticks(t_lims)
cbar.set_ticklabels(['%0.1f' % t_lim for t_lim in t_lims])
cbar.set_label('t-value')
cbar.ax.get_xaxis().set_label_coords(0.5, -0.3)
if not show:
axes[0].set(title=title)
if mcc:
axes[0].title.set_weight('bold')
# p plot
use_p = -np.log10(np.reshape(np.maximum(p, 1e-5), (width, width)))
img = axes[1].imshow(use_p, cmap='inferno', vmin=p_lims[0], vmax=p_lims[1],
interpolation='nearest')
axes[1].set(xticks=[], yticks=[])
cbar = plt.colorbar(ax=axes[1], shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025, mappable=img)
cbar.set_ticks(p_lims)
cbar.set_ticklabels(['%0.1f' % p_lim for p_lim in p_lims])
cbar.set_label(r'$-\log_{10}(p)$')
cbar.ax.get_xaxis().set_label_coords(0.5, -0.3)
if show:
text = fig.suptitle(title)
if mcc:
text.set_weight('bold')
plt.subplots_adjust(0, 0.05, 1, 0.9, wspace=0, hspace=0)
mne.viz.utils.plt_show()
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
"""
Explanation: In this case, a null hypothesis we could test for each voxel is:
There is no difference between the mean value and zero
($H_0 \colon \mu = 0$).
The alternative hypothesis, then, is that the voxel has a non-zero mean
($H_1 \colon \mu \neq 0$).
This is a two-tailed test because the mean could be less than
or greater than zero, whereas a one-tailed test would test only one of
these possibilities, i.e. $H_1 \colon \mu \geq 0$ or
$H_1 \colon \mu \leq 0$.
<div class="alert alert-info"><h4>Note</h4><p>Here we will refer to each spatial location as a "voxel".
In general, though, it could be any sort of data value,
including cortical vertex at a specific time, pixel in a
time-frequency decomposition, etc.</p></div>
Parametric tests
^^^^^^^^^^^^^^^^
Let's start with a paired t-test, which is a standard test
for differences in paired samples. Mathematically, it is equivalent
to a 1-sample t-test on the difference between the samples in each condition.
The paired t-test is parametric
because it assumes that the underlying sample distribution is Gaussian, and
is only valid in this case. This happens to be satisfied by our toy dataset,
but is not always satisfied for neuroimaging data.
In the context of our toy dataset, which has many voxels
($40 \cdot 40 = 1600$), applying the paired t-test is called a
mass-univariate approach as it treats each voxel independently.
End of explanation
"""
ts.append(ttest_1samp_no_p(X, sigma=sigma))
ps.append(stats.distributions.t.sf(np.abs(ts[-1]), len(X) - 1) * 2)
titles.append(r'$\mathrm{t_{hat}}$')
mccs.append(False)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
"""
Explanation: "Hat" variance adjustment
~~~~~~~~~~~~~~~~~~~~~~~~~
The "hat" technique regularizes the variance values used in the t-test
calculation [1]_ to compensate for implausibly small variances.
End of explanation
"""
# Here we have to do a bit of gymnastics to get our function to do
# a permutation test without correcting for multiple comparisons:
X.shape = (n_subjects, n_src) # flatten the array for simplicity
titles.append('Permutation')
ts.append(np.zeros(width * width))
ps.append(np.zeros(width * width))
mccs.append(False)
for ii in range(n_src):
ts[-1][ii], ps[-1][ii] = permutation_t_test(X[:, [ii]], verbose=False)[:2]
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
"""
Explanation: Non-parametric tests
^^^^^^^^^^^^^^^^^^^^
Instead of assuming an underlying Gaussian distribution, we could instead
use a non-parametric resampling method. In the case of a paired t-test
between two conditions A and B, which is mathematically equivalent to a
one-sample t-test between the difference in the conditions A-B, under the
null hypothesis we have the principle of exchangeability. This means
that, if the null is true, we can exchange conditions and not change
the distribution of the test statistic.
When using a paired t-test, exchangeability thus means that we can flip the
signs of the difference between A and B. Therefore, we can construct the
null distribution values for each voxel by taking random subsets of
samples (subjects), flipping the sign of their difference, and recording the
absolute value of the resulting statistic (we record the absolute value
because we conduct a two-tailed test). The absolute value of the statistic
evaluated on the veridical data can then be compared to this distribution,
and the p-value is simply the proportion of null distribution values that
are smaller.
<div class="alert alert-danger"><h4>Warning</h4><p>In the case of a true one-sample t-test, i.e. analyzing a single
condition rather than the difference between two conditions,
it is not clear where/how exchangeability applies; see
`this FieldTrip discussion <ft_exch_>`_.</p></div>
In the case where n_permutations is large enough (or "all") so
that the complete set of unique resampling exchanges can be done
(which is $2^{N_{samp}}-1$ for a one-tailed and
$2^{N_{samp}-1}-1$ for a two-tailed test, not counting the
veridical distribution), instead of randomly exchanging conditions
the null is formed from using all possible exchanges. This is known
as a permutation test (or exact test).
End of explanation
"""
N = np.arange(1, 80)
alpha = 0.05
p_type_I = 1 - (1 - alpha) ** N
fig, ax = plt.subplots(figsize=(4, 3))
ax.scatter(N, p_type_I, 3)
ax.set(xlim=N[[0, -1]], ylim=[0, 1], xlabel=r'$N_{\mathrm{test}}$',
ylabel=u'Probability of at least\none type I error')
ax.grid(True)
fig.tight_layout()
fig.show()
"""
Explanation: Multiple comparisons
So far, we have done no correction for multiple comparisons. This is
potentially problematic for these data because there are
$40 \cdot 40 = 1600$ tests being performed. If we use a threshold
p < 0.05 for each individual test, we would expect many voxels to be declared
significant even if there were no true effect. In other words, we would make
many type I errors (adapted from here <errors_>_):
.. rst-class:: skinnytable
+----------+--------+------------------+------------------+
| | Null hypothesis |
| +------------------+------------------+
| | True | False |
+==========+========+==================+==================+
| | | Type I error | Correct |
| | Yes | False positive | True positive |
+ Reject +--------+------------------+------------------+
| | | Correct | Type II error |
| | No | True Negative | False negative |
+----------+--------+------------------+------------------+
To see why, consider a standard $\alpha = 0.05$.
For a single test, our probability of making a type I error is 0.05.
The probability of making at least one type I error in
$N_{\mathrm{test}}$ independent tests is then given by
$1 - (1 - \alpha)^{N_{\mathrm{test}}}$:
End of explanation
"""
titles.append('Bonferroni')
ts.append(ts[-1])
ps.append(bonferroni_correction(ps[0])[1])
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
"""
Explanation: To combat this problem, several methods exist. Typically these
provide control over either one of the following two measures:
Familywise error rate (FWER) <fwer_>_
The probability of making one or more type I errors:
.. math::
\mathrm{P}(N_{\mathrm{type\ I}} >= 1 \mid H_0)
False discovery rate (FDR) <fdr_>_
The expected proportion of rejected null hypotheses that are
actually true:
.. math::
\mathrm{E}(\frac{N_{\mathrm{type\ I}}}{N_{\mathrm{reject}}}
\mid N_{\mathrm{reject}} > 0) \cdot
\mathrm{P}(N_{\mathrm{reject}} > 0 \mid H_0)
We cover some techniques that control FWER and FDR below.
Bonferroni correction
^^^^^^^^^^^^^^^^^^^^^
Perhaps the simplest way to deal with multiple comparisons, Bonferroni
correction <https://en.wikipedia.org/wiki/Bonferroni_correction>__
conservatively multiplies the p-values by the number of comparisons to
control the FWER.
End of explanation
"""
titles.append('FDR')
ts.append(ts[-1])
ps.append(fdr_correction(ps[0])[1])
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
"""
Explanation: False discovery rate (FDR) correction
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Typically FDR is performed with the Benjamini-Hochberg procedure, which
is less restrictive than Bonferroni correction for large numbers of
comparisons (fewer type II errors), but provides less strict control of type
I errors.
End of explanation
"""
titles.append(r'$\mathbf{Perm_{max}}$')
out = permutation_t_test(X, verbose=False)[:2]
ts.append(out[0])
ps.append(out[1])
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
"""
Explanation: Non-parametric resampling test with a maximum statistic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Non-parametric resampling tests can also be used to correct for multiple
comparisons. In its simplest form, we again do permutations using
exchangeability under the null hypothesis, but this time we take the
maximum statistic across all voxels in each permutation to form the
null distribution. The p-value for each voxel from the veridical data
is then given by the proportion of null distribution values
that were smaller.
This method has two important features:
It controls FWER.
It is non-parametric. Even though our initial test statistic
(here a 1-sample t-test) is parametric, the null
distribution for the null hypothesis rejection (the mean value across
subjects is indistinguishable from zero) is obtained by permutations.
This means that it makes no assumptions of Gaussianity
(which do hold for this example, but do not in general for some types
of processed neuroimaging data).
End of explanation
"""
from sklearn.feature_extraction.image import grid_to_graph # noqa: E402
mini_connectivity = grid_to_graph(3, 3).toarray()
assert mini_connectivity.shape == (9, 9)
print(mini_connectivity[0])
"""
Explanation: Clustering
^^^^^^^^^^
Each of the aforementioned multiple comparisons corrections have the
disadvantage of not fully incorporating the correlation structure of the
data, namely that points close to one another (e.g., in space or time) tend
to be correlated. However, by defining the connectivity/adjacency/neighbor
structure in our data, we can use clustering to compensate.
To use this, we need to rethink our null hypothesis. Instead
of thinking about a null hypothesis about means per voxel (with one
independent test per voxel), we consider a null hypothesis about sizes
of clusters in our data, which could be stated like:
The distribution of spatial cluster sizes observed in two experimental
conditions are drawn from the same probability distribution.
Here we only have a single condition and we contrast to zero, which can
be thought of as:
The distribution of spatial cluster sizes is independent of the sign
of the data.
In this case, we again do permutations with a maximum statistic, but, under
each permutation, we:
Compute the test statistic for each voxel individually.
Threshold the test statistic values.
Cluster voxels that exceed this threshold (with the same sign) based on
adjacency.
Retain the size of the largest cluster (measured, e.g., by a simple voxel
count, or by the sum of voxel t-values within the cluster) to build the
null distribution.
After doing these permutations, the cluster sizes in our veridical data
are compared to this null distribution. The p-value associated with each
cluster is again given by the proportion of smaller null distribution
values. This can then be subjected to a standard p-value threshold
(e.g., p < 0.05) to reject the null hypothesis (i.e., find an effect of
interest).
This reframing to consider cluster sizes rather than individual means
maintains the advantages of the standard non-parametric permutation
test -- namely controlling FWER and making no assumptions of parametric
data distribution.
Critically, though, it also accounts for the correlation structure in the
data -- which in this toy case is spatial but in general can be
multidimensional (e.g., spatio-temporal) -- because the null distribution
will be derived from data in a way that preserves these correlations.
.. sidebar:: Effect size
For a nice description of how to compute the effect size obtained
in a cluster test, see this
`FieldTrip mailing list discussion <ft_cluster_effect_size_>`_.
However, there is a drawback. If a cluster significantly deviates from
the null, no further inference on the cluster (e.g., peak location) can be
made, as the entire cluster as a whole is used to reject the null.
Moreover, because the test statistic concerns the full data, the null
hypothesis (and our rejection of it) refers to the structure of the full
data. For more information, see also the comprehensive
FieldTrip tutorial <ft_cluster_>_.
Defining the connectivity/neighbor/adjacency matrix
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
First we need to define our connectivity/neighbor/adjacency matrix.
This is a square array (or sparse matrix) of shape (n_src, n_src) that
contains zeros and ones to define which spatial points are connected, i.e.,
which voxels are adjacent to each other. In our case this
is quite simple, as our data are aligned on a rectangular grid.
Let's pretend that our data were smaller -- a 3 x 3 grid. Thinking about
each voxel as being connected to the other voxels it touches, we would
need a 9 x 9 connectivity matrix. The first row of this matrix contains the
voxels in the flattened data that the first voxel touches. Since it touches
the second element in the first row and the first element in the second row
(and is also a neighbor to itself), this would be::
[1, 1, 0, 1, 0, 0, 0, 0, 0]
:mod:sklearn.feature_extraction provides a convenient function for this:
End of explanation
"""
titles.append('Clustering')
# Reshape data to what is equivalent to (n_samples, n_space, n_time)
X.shape = (n_subjects, width, width)
# Compute threshold from t distribution (this is also the default)
threshold = stats.distributions.t.ppf(1 - alpha, n_subjects - 1)
t_clust, clusters, p_values, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold, connectivity=None,
n_permutations=n_permutations)
# Put the cluster data in a viewable format
p_clust = np.ones((width, width))
for cl, p in zip(clusters, p_values):
p_clust[cl] = p
ts.append(t_clust)
ps.append(p_clust)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
"""
Explanation: In general the connectivity between voxels can be more complex, such as
those between sensors in 3D space, or time-varying activation at brain
vertices on a cortical surface. MNE provides several convenience functions
for computing connectivity/neighbor/adjacency matrices (see the
Statistics API <api_reference_statistics>).
Standard clustering
~~~~~~~~~~~~~~~~~~~
Here, since our data are on a grid, we can use connectivity=None to
trigger optimized grid-based code, and run the clustering algorithm.
End of explanation
"""
titles.append(r'$\mathbf{C_{hat}}$')
stat_fun_hat = partial(ttest_1samp_no_p, sigma=sigma)
t_hat, clusters, p_values, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold, connectivity=None,
n_permutations=n_permutations, stat_fun=stat_fun_hat, buffer_size=None)
p_hat = np.ones((width, width))
for cl, p in zip(clusters, p_values):
p_hat[cl] = p
ts.append(t_hat)
ps.append(p_hat)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
"""
Explanation: "Hat" variance adjustment
~~~~~~~~~~~~~~~~~~~~~~~~~
This method can also be used in this context to correct for small
variances [1]_:
End of explanation
"""
titles.append(r'$\mathbf{C_{TFCE}}$')
threshold_tfce = dict(start=0, step=0.2)
t_tfce, _, p_tfce, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold_tfce, connectivity=None,
n_permutations=n_permutations)
ts.append(t_tfce)
ps.append(p_tfce)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
"""
Explanation: Threshold-free cluster enhancement (TFCE)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
TFCE eliminates the free parameter initial threshold value that
determines which points are included in clustering by approximating
a continuous integration across possible threshold values with a standard
Riemann sum <https://en.wikipedia.org/wiki/Riemann_sum>__ [2]_.
This requires giving a starting threshold start and a step
size step, which in MNE is supplied as a dict.
The smaller the step and closer to 0 the start value,
the better the approximation, but the longer it takes.
A significant advantage of TFCE is that, rather than modifying the
statistical null hypothesis under test (from one about individual voxels
to one about the distribution of clusters in the data), it modifies the data
under test while still controlling for multiple comparisons.
The statistical test is then done at the level of individual voxels rather
than clusters. This allows for evaluation of each point
independently for significance rather than only as cluster groups.
End of explanation
"""
titles.append(r'$\mathbf{C_{hat,TFCE}}$')
t_tfce_hat, _, p_tfce_hat, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold_tfce, connectivity=None,
n_permutations=n_permutations, stat_fun=stat_fun_hat, buffer_size=None)
ts.append(t_tfce_hat)
ps.append(p_tfce_hat)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
"""
Explanation: We can also combine TFCE and the "hat" correction:
End of explanation
"""
fig = plt.figure(facecolor='w', figsize=(14, 3))
assert len(ts) == len(titles) == len(ps)
for ii in range(len(ts)):
ax = [fig.add_subplot(2, 10, ii + 1, projection='3d'),
fig.add_subplot(2, 10, 11 + ii)]
plot_t_p(ts[ii], ps[ii], titles[ii], mccs[ii], ax)
fig.tight_layout(pad=0, w_pad=0.05, h_pad=0.1)
plt.show()
"""
Explanation: Visualize and compare methods
Let's take a look at these statistics. The top row shows each test statistic,
and the bottom shows p-values for various statistical tests, with the ones
with proper control over FWER or FDR with bold titles.
End of explanation
"""
|
JeromeRisselin/PRJ-medtec_sigproc | SigProc_101/SigProc-101-pimped.ipynb | mit | from __future__ import print_function
import numpy as np
from PIL import Image # for bmp import
from glob import glob
from scipy.misc import imresize
import matplotlib.pyplot as plt
import math
import time
%matplotlib inline
def showImage(imageToPlot):
plt.figure(figsize=(2, 4))
plt.gray()
plt.imshow(imageToPlot.reshape(imageToPlot.shape), aspect='auto')
plt.show()
def normImag(A):
# Let's normalize the image
A = A - A.min()
A = 1.0*A/A.max()
return(A)
"""
Explanation: Challenge : envelope detection
The aim of this challenge is to determine what is the best envelope detection algorithm to be used for the echOpen probe's raw signal.
This challenge is meant to start playing with usual envelope detection algorithms so as to gather knowledge about state-of-art techniques that could be used in the future to improve the echOpen preprocessing.
In this challenge we'll use raw data that are simulated from images by modulating the signal with a sinusoïdal function, the frequence of which corresponds to the echOpen piezo frequence. Obviously, this is an "ideal" case, but that allows to test different implementations while monitoring the error between the envelope detection algorithm used and the ground truth image from which raw data were simulated.
Eventually, the implementations that will be retained at the end of this challenge will be tested on real echOpen raw data. This will allow to measure the different algorithms' performances in terms of "image quality" (impact on spatial resolution, for example).
Pipeline
The scheme below sums up the different steps that are gone through in this notebook :
Starting from an ultrasound image of good quality ("ground truth"), we apply an amplitude modulation by multiplicating each line by a sinusoidal signal. This allows to simulate what would be the raw signal that would lead to each line in the image.
We implemented a very basic reconstruction algorithm that will serve as a baseline (i.e. you should do better!). The reconstruction function takes as input the simulated raw signal and performs envelope detection to get an image.
We compute the error map between the reconstructed image and the ground truth. We attribute to the reconstruction method a score that corresponds to the sum of squared errors between each pixels.
At the very end of the notebook is a reconstructImage() function in which you can implement your own envelope detection method. Then you'll be able to directly evaluate the score associated with your method. Just play with the reconstructImage() function and try to lower the error!
Once you're satisfied with your method, you can submit your reconstructImage() to the leaderboard by following the instructions at the end of this notebook.
<img src="files/pipeline.png">
Loading useful libraries
End of explanation
"""
im = Image.open("fantom.bmp").convert('L') # convert 'L' is to get a flat image, not RGB
groundTruth = normImag(np.array(im)) # we use the full [0;1] range
showImage(groundTruth)
"""
Explanation: Loading and studying the 342x719 image of fantom
Here we load the original image that will serve as "ground truth" that we would like to achieve. This image will later be altered in a way that allows to simulate a raw signal (i.e. the image before envelope detection is performed).
End of explanation
"""
depth = 0.13 # in meters
resolution = groundTruth.shape[0]/depth # in pts/m
t = depth*2.0/1450.0
print('Image resolution in pixels/mm : ', resolution/1000.0)
print('Listening time in micro-secs : ', t*1.0E6)
"""
Explanation: Let's assume vertical line points are spaced by 1cm each. This corresponds to a depth of about 13cm.
End of explanation
"""
sps = 60.0E6
f = 3.5E6
L = int(t*sps)
print("Number of points in raw signal : ", L)
"""
Explanation: The corresponding resolution is 5.53 pts / mm. At a speed of 1450m/s for sound, we'd have a listening time of around 180µs of recording.
Simulating a raw signal that would lead to this "ground truth" image
Let's assume an ADC sampling rate of 60Msps (close to our prototype) and a piezo frequency f = 3.5 MHz, and compute the length of the raw signal :
End of explanation
"""
# First create a table of L points for each line, from the original image, by using bicubic interpolation
# This is to get a smoother and more realistic raw signal
BigImg = imresize(groundTruth, ( L,groundTruth.shape[1]), interp='bicubic')
# Then simulate raw signal by modulating the data of BigImg with a sinusoidal function,
# the frequence of which corresponds to the piezo frequency
rawSignal = np.zeros(shape=(L,groundTruth.shape[1]))
for i in range(len(rawSignal)):
for j in range(len(rawSignal[0])):
pixelValue = 1.0*BigImg[i][j]
w = 2.0*math.radians(180)*f
rawSignal[i][j] = pixelValue*math.cos(1.0*i*w/sps)
"""
Explanation: The corresponding length of raw signal is close to 11k points.
We can then recreate the raw signal image :
End of explanation
"""
line = np.zeros(shape=(L))
imageLine = np.zeros(shape=(L))
for i in range(len(rawSignal)):
line[i] = rawSignal[i][10]
imageLine[i] = BigImg[i][10]
plt.plot(line)
plt.plot(imageLine)
plt.show()
"""
Explanation: Let's check that we have the image (in green) and the corresponding signal (in blue) :
End of explanation
"""
maxFreq = 6.0E6
xLimit = int(L*maxFreq/sps) # upper cap to
lineFFT = np.abs(np.fft.fft(line))
xScale = range(xLimit)
for i in range(xLimit):
xScale[i] = (60.0E6)*float(xScale[i])/(L*(1.0E6))
plt.plot(xScale,lineFFT[0:xLimit])
plt.xlabel('Frequency (MHz)')
plt.show()
"""
Explanation: Let's analyse this signal in the frequency domain, through a FFT. We should see the image, modulated by the 3.5MHz. That is, a "potato" around a 3.5MHz peak :
End of explanation
"""
# Let's save the raw signal data
np.savetxt("RawSignal.csv.gz",rawSignal, delimiter=';')
"""
Explanation: Conclusion: our rawSignal matches the raw signal's characteristics for the fantom image !
Saving the raw signal into a file for use in a different code
Let's save the raw signal data into a compressed .csv file so that you'll be able to load it from a different code (e.g. if you're not at ease with python, you can make your own script in whatever language you want to implement the envelope detection algorithm). Note that np.savetxt() and np.load() transparently accepts gz files.
End of explanation
"""
def ssd(A,B):
A = A - 0.95*A.min()
A = 1.0*A/A.max()
B = B - 0.95*B.min()
B = 1.0*B/B.max()
squares = (A[:,:] - B[:,:]) ** 2
return np.sum(squares)
def estimateScore(groundTruth, reconstructedImage) :
errorMap = (groundTruth - reconstructedImage)
print('Error map between ground truth and reconstructed image : ')
showImage(errorMap)
score = ssd(reconstructedImage,groundTruth)
maxErr = errorMap.max()
return [score,maxErr]
def compareImages(im1,im2) :
plt.figure()
ax = plt.subplot(1, 2, 1)
plt.imshow(im1)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(1, 2, 2)
plt.imshow(im2)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
"""
Explanation: Envelope detection challenge
Below are the pieces of code related to the proper envelope detection.
Firstly, some useful functions are defined to display, compare images, and allow performance assessment.
A basic decimation algorithm is then implemented. This method will serve as a baseline : you're supposed to do better !
You'll have to define your own envelope detection method
An automated score estimation and comparison between the baseline and your algorithm are provided.
Score estimation and comparison functions
The estimateScore() function computes the error map between reconstructed image and the ground truth, and returns a score associated to this error map, as well as the max error achieved on a given pixel.
You should retain the algorithm that achieves the lowest possible value for these scores.
End of explanation
"""
def reconstructBaseline(rawSignal,image_shape) :
reconstructedImage = np.zeros(shape=(image_shape[0],image_shape[1]))
decimationFactor = 1.0*rawSignal.shape[0]/image_shape[0]
for i in range(rawSignal.shape[0]):
for j in range(image_shape[1]):
reconstructedImage[int(i/decimationFactor)][j] += np.abs(rawSignal[i][j])
reconstructedImage = normImag(np.abs(reconstructedImage))
return reconstructedImage
"""
Explanation: Baseline method (don't change this one ! )
This function implements a basic decimation.
End of explanation
"""
reconBaseline = reconstructBaseline(rawSignal,groundTruth.shape)
compareImages(groundTruth, reconBaseline)
[scoreBaseline,maxErrBaseline] = estimateScore(groundTruth, reconBaseline)
print('Score for Baseline method : ', scoreBaseline)
print('max Err between pixels for Baseline method : ', maxErrBaseline)
"""
Explanation: Let's compare the image reconstructed with the baseline method, with the ground truth to achieve :
End of explanation
"""
def reconstructImage(rawSignal,image_shape) :
# Here is a copy of the baseline method. Replace that by another method.
reconstructedImage = np.zeros(shape=(image_shape[0],image_shape[1]))
decimationFactor = 1.0*rawSignal.shape[0]/image_shape[0]
for i in range(rawSignal.shape[0]):
for j in range(image_shape[1]):
reconstructedImage[int(i/decimationFactor)][j] += np.abs(rawSignal[i][j])
reconstructedImage = normImag(np.abs(reconstructedImage))
# The function should return the reconstructed image
return reconstructedImage
"""
Explanation: Your turn : implement your own method in the function below
End of explanation
"""
recon = reconstructImage(rawSignal,groundTruth.shape)
compareImages(groundTruth, recon)
[score,maxErr] = estimateScore(groundTruth, recon)
print('Score for your method : ', score)
print('max Err between pixels for your method : ', maxErr)
"""
Explanation: Performance assessment of your method
End of explanation
"""
def install_packages():
import pip
pip.main(['install', 'scipy'])
def run(rawSignal,image_shape) :
import numpy as np
from scipy.signal import hilbert
reconstructedImage = np.zeros(shape=(image_shape[0],image_shape[1]))
analytic_signal = hilbert(rawSignal)
amplitude_envelope = np.abs(analytic_signal)
decimationFactor = 1.0*amplitude_envelope.shape[0]/image_shape[0]
old_pixel = 0
nb_points=0
for i in range(amplitude_envelope.shape[0]):
for j in range(image_shape[1]):
reconstructedImage[int(i/decimationFactor)][j] += np.abs(amplitude_envelope[i][j])
if (int(i/decimationFactor) == old_pixel):
nb_points += 1
else:
nb_points += 1
reconstructedImage[int(i/decimationFactor)-1] = reconstructedImage[int(i/decimationFactor)-1]/nb_points
nb_points = 1
old_pixel = old_pixel+1
reconstructedImage = normImag(np.abs(reconstructedImage))
# The function should return the reconstructed image
return reconstructedImage
"""
Explanation: Submitting your own method to the leaderboard
To submit your own implementation to our leaderboard and compare your performances to other teams, go to http://37.187.117.106:8888/.
Subscribe to the leaderboard
Go to the IDE and paste your code, in the same form as the example provided in the cell below. The code should at least include the definition of a function "run(rawSignal,image_shape)" where :
rawData is a numpy.array containing the raw signal values (in the same format as in this notebook)
imageShape is an array [imageLength, imageWidth] with dimensions of the reconstructed image
the function should return a numpy.array of shape [imageLength, imageWidth] containing the reconstructed image values
It's possible to install python packages via pip, by defining a "install_packages()" function. The imports should then be done in the run function.
Click on the "submit" button.
After some time, a notification will inform you about your score and your ranking will appear in the leaderboard.
You can submit the example code in the cell below. This implementation should lead to a score of 12481.6872689
End of explanation
"""
|
hvillanua/deep-learning | gan_mnist/Intro_to_GANs_Solution.ipynb | mit | %matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
"""
Explanation: Generative Adversarial Network
In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!
GANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:
Pix2Pix
CycleGAN
A whole list
The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.
The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can foold the discriminator.
The output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.
End of explanation
"""
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
return inputs_real, inputs_z
"""
Explanation: Model Inputs
First we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks.
End of explanation
"""
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('generator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.tanh(logits)
return out
"""
Explanation: Generator network
Here we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.
Variable Scope
Here we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks.
We could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.
To use tf.variable_scope, you use a with statement:
python
with tf.variable_scope('scope_name', reuse=False):
# code here
Here's more from the TensorFlow documentation to get another look at using tf.variable_scope.
Leaky ReLU
TensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can use take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x:
$$
f(x) = max(\alpha * x, x)
$$
Tanh Output
The generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.
End of explanation
"""
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('discriminator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
"""
Explanation: Discriminator
The discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.
End of explanation
"""
# Size of input image to discriminator
input_size = 784
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Smoothing
smooth = 0.1
"""
Explanation: Hyperparameters
End of explanation
"""
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Build the model
g_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha)
# g_model is the generator output
d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, n_units=d_hidden_size, alpha=alpha)
"""
Explanation: Build network
Now we're building the network from the functions defined above.
First is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z.
Then, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes.
Then the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True).
End of explanation
"""
# Calculate losses
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_logits_real) * (1 - smooth)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_logits_real)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_logits_fake)))
"""
Explanation: Discriminator and Generator Losses
Now we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will by sigmoid cross-entropys, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like
python
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
For the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth)
The discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.
Finally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.
End of explanation
"""
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator')]
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
"""
Explanation: Optimizers
We want to update the generator and discriminator variables separately. So we need to get the variables for each part build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph.
For the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables to start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance).
We can do something similar with the discriminator. All the variables in the discriminator start with discriminator.
Then, in the optimizer we pass the variable lists to var_list in the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list.
End of explanation
"""
batch_size = 100
epochs = 100
samples = []
losses = []
# Only save generator variables
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
"""
Explanation: Training
End of explanation
"""
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
"""
Explanation: Training loss
Here we'll check out the training losses for the generator and discriminator.
End of explanation
"""
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
"""
Explanation: Generator samples from training
Here we can view samples of images from the generator. First we'll look at images taken while training.
End of explanation
"""
_ = view_samples(-1, samples)
"""
Explanation: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 1, 7, 3, 2. Since this is just a sample, it isn't representative of the full range of images this generator can make.
End of explanation
"""
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
"""
Explanation: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
End of explanation
"""
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
_ = view_samples(0, [gen_samples])
"""
Explanation: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise like 1s and 9s.
Sampling from the generator
We can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!
End of explanation
"""
|
desihub/desisim | doc/nb/bgs-archetypes.ipynb | bsd-3-clause | import os
import numpy as np
import matplotlib.pyplot as plt
from desispec.io.util import write_bintable, makepath
from desisim.io import write_templates
from desisim.archetypes import compute_chi2, ArcheTypes
import multiprocessing
nproc = multiprocessing.cpu_count() // 2
plt.style.use('seaborn-talk')
%matplotlib inline
"""
Explanation: BGS Archetypes
The goal of this notebook is to derive a set of spectral archetypes from the BGS template set using Guangtun Zhu's SetCoverPy algorithm.
Preliminaries.
End of explanation
"""
seed = 123
rand = np.random.RandomState(seed)
"""
Explanation: Initialize the random seed so the results are reproducible, below.
End of explanation
"""
version = 'v1.0'
outdir = os.path.join(os.getenv('DESI_ROOT'), 'spectro', 'templates', 'archetypes', 'bgs', version)
print('Setting output directory to {}'.format(outdir))
os.makedirs(outdir, exist_ok=True)
chi2file = os.path.join(outdir, 'bgs_archetypes_chi2_{}.fits'.format(version))
archfile = os.path.join(outdir, 'bgs_archetypes_{}.fits'.format(version))
"""
Explanation: Output path and filenames.
End of explanation
"""
def _build_templates(args):
"""Filler function for the multiprocessing."""
build_templates(*args)
def build_templates(bgs, input_meta, verbose=False):
flux, _, meta = bgs.make_templates(input_meta=input_meta, novdisp=True,
nocolorcuts=True, verbose=verbose)
return [flux.astype('f4'), meta]
def read_and_normalize(verbose=False, nproc=1, minwave=1200, maxwave=2e4,
cdelt=0.2, nominal_rmag=18.0, nominal_vdisp=1000,
subset=False):
"""Read and normalize the full set of basis templates.
"""
from astropy.table import vstack
from desisim.templates import BGS
from desisim.io import empty_metatable
bgs = BGS(minwave=minwave, maxwave=maxwave, cdelt=cdelt)
bgs.normline = None # no emission line
nspec = len(bgs.basemeta)
if subset:
nspec = 1000
these = rand.choice(len(bgs.basemeta), nspec)
print('Selecting a subset of {} / {} templates!'.format(nspec, len(bgs.basemeta)))
else:
these = np.arange(nspec)
input_meta = empty_metatable(nmodel=nspec, objtype='BGS')
input_meta['TEMPLATEID'] = these
input_meta['REDSHIFT'] = 0.0
input_meta['MAG'] = nominal_rmag
input_meta['VDISP'] = nominal_vdisp
input_meta['SEED'] = rand.randint(2**32, size=nspec)
# Not sure why multiprocessing isn't working in this case.
if nproc > 1:
chunk = np.array_split(these, nproc)
tempargs = list()
for ii in range(nproc):
tempargs.append((bgs, input_meta[chunk[ii]], verbose))
pool = multiprocessing.Pool(nproc)
out = pool.map(_build_templates, tempargs)
flux = np.vstack(out[0])
meta = vstack(out[1])
else:
flux, meta = build_templates(bgs, input_meta, verbose)
nspec, npix = flux.shape
print('Generated {} rest-frame BGS spectra with {} pixels.'.format(nspec, npix))
return flux, bgs.wave, meta, bgs.basemeta[these]
%time flux, wave, meta, basemeta = read_and_normalize(nproc=1, cdelt=1.0, minwave=3600, maxwave=7000, subset=True)
nspec, npix = flux.shape
%time hiresflux, hireswave, _, _ = read_and_normalize(nproc=1, cdelt=0.2, minwave=1200, maxwave=2e4, subset=True)
_, hiresnpix = hiresflux.shape
def plot_subset(nplot=25, ncol=5):
"""Plot a random sampling of the basis templates."""
nspec, npix = flux.shape
nrow = np.ceil(nplot / ncol).astype('int')
these = rand.choice(nspec, nplot, replace=False)
fig, ax = plt.subplots(nrow, ncol, figsize=(2.2*ncol, 2.2*nrow), sharey=True, sharex=True)
for thisax, indx in zip(ax.flat, these):
thisax.plot(wave, flux[indx, :])
thisax.text(0.95, 0.93, '{:0d}'.format(indx), ha='right',
va='top', transform=thisax.transAxes, fontsize=11)
thisax.xaxis.set_major_locator(plt.MaxNLocator(3))
fig.subplots_adjust(wspace=0.05, hspace=0.05)
plot_subset()
"""
Explanation: Read the BGS basis templates.
Read both a set of lower-resolution (1 A/pix) templates sampled over a restricted wavelength range (roughly 3500-7000 A) and the same set at higher resolution (0.2 A/pix) and over the wavelength range 0.12-2 micron. The lower-resolution templates will be used to determine the archetypes (since speed is an issue) while the full-resolution templates is what we actually write out.
In both cases we (arbitrarily) normalize every template to r=18 and adopt a nominal velocity dispersion of 100 km/s.
End of explanation
"""
def write_chi2(chi2):
from astropy.io import fits
print('Writing {}'.format(chi2file))
hdu = fits.PrimaryHDU(chi2)
hdu.writeto(chi2file, overwrite=True)
%time chi2, amp = compute_chi2(flux)
write_chi2(chi2)
prec = 0.1
chi2min_nominal = npix*prec**2
print(chi2min_nominal, np.log10(chi2min_nominal)) # seems high...
with np.errstate(divide='ignore'):
logchi2 = np.log10(chi2)
logchi2[chi2 == 0] = -1
fig, ax = plt.subplots(1, 2, figsize=(12, 4))
im = ax[0].imshow(logchi2, origin='lower', interpolation='nearest',
vmin=-1.0, cmap='viridis')
ax[0].set_xlabel('Spectrum Number')
ax[0].set_ylabel('Spectrum Number')
plt.colorbar(im, label='$log_{10}(\chi^{2})$', ax=ax[0])
_ = ax[1].hist(logchi2.reshape(nspec * nspec), bins=30, range=(-1.2, np.max(logchi2)))
ax[1].set_ylabel('Number')
ax[1].set_xlabel('$log_{10}(\chi^{2}$)')
"""
Explanation: Compute the NxN chi2 matrix.
We use chi2 as the "distance" matrix for the Set Cover problem.
Then, we need to determine what threshold chi2 value differentiates "different" templates.
Note that the threshold chi^2 value can be tuned until the desired number of archetypes is achieved. However, If we want the archetypes to describe each spectrum in the parent sample to a precision of prec=0.1 (10%) then we we should set chi2min to be approximately npix*prec^2.
End of explanation
"""
def narch_vs_chi2min(Arch):
"""Determine the number of archtypes vs chi2 threshold.
"""
cost = np.ones(nspec) # uniform cost
chi2min = np.logspace(1, 5, 10)
print(chi2min)
narch = np.zeros_like(chi2min)
for ii, cmin in enumerate(chi2min):
iarch = Arch.get_archetypes(chi2_thresh=cmin)
narch[ii] = len(iarch)
return narch, chi2min
def qa_narch_vs_chi2min():
fig, ax = plt.subplots()
ax.scatter(np.log10(chi2min), narch)
ax.set_xlabel('$log_{10}(\chi^{2})$ Threshold')
ax.set_ylabel('Number of Archetypes')
ax.axvline(x=np.log10(chi2min_nominal), color='red', ls='-')
ax.grid(True)
Arch = ArcheTypes(chi2)
narch, chi2min = narch_vs_chi2min(Arch)
qa_narch_vs_chi2min()
"""
Explanation: Compute and plot the number of archetypes vs chi2 threshold.
End of explanation
"""
def write_archetypes():
"""ToDo: Write out the responsibility indices for each archetype."""
from astropy.table import Column
outmeta = meta[iarch]
outmeta.add_column(Column(name='RESPONSIBILITY', length=len(iarch), dtype='int8'))
outmeta['RESPONSIBILITY'] = resp
print('Writing {}'.format(archfile))
write_templates(archfile, hiresflux[iarch, :], hireswave, outmeta, objtype='BGS Archetypes')
chi2_thresh = 10**2.5
print('Choosing a log10(chi2) threshold value of {:.1f}.'.format(np.log10(chi2_thresh)))
_iarch, _resp, _respindx = Arch.get_archetypes(chi2_thresh=chi2_thresh, responsibility=True)
print('Generated {} archetypes.'.format(len(_iarch)))
"""
Explanation: Choose a chi2 threshold value then get the final set of archetypes.
End of explanation
"""
srt = np.argsort(meta['D4000'][_iarch])
iarch = _iarch[srt]
resp = _resp[srt]
respindx = []
for ss in srt:
respindx.append(_respindx[ss])
write_archetypes()
"""
Explanation: Sort by Dn(4000).
End of explanation
"""
def _markers():
d4000 = meta['D4000']
size = 110 * (1+(resp - resp.min()) / resp.ptp())
shade = (d4000[iarch] - d4000[iarch].min()) / d4000[iarch].ptp()
col = plt.cm.coolwarm(shade)
return size, col
def qa_responsibility():
"""Generate a color-color plot with the symbol size scaled by the responsibility.
"""
rz = -2.5 * np.log10(meta['FLUX_R'] / meta['FLUX_Z'])
d4000 = meta['D4000']
size, col = _markers()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(11, 4), sharey=True)
ax1.scatter(rz[iarch], resp, c=col, marker='o', s=size,
edgecolor='k')
ax1.set_xlabel('r - z')
ax1.set_ylabel('Responsibility')
ax1.grid(True)
ax2.scatter(d4000[iarch], resp, c=col, marker='o', s=size,
edgecolor='k')
ax2.set_xlabel('$D_{n}(4000)$')
ax2.grid(True)
fig.subplots_adjust(wspace=0.05)
qa_responsibility()
def qa_colorcolor():
"""Generate a color-color plot with the symbol size scaled by the responsibility.
"""
gr = -2.5 * np.log10(meta['FLUX_G'] / meta['FLUX_R'])
rz = -2.5 * np.log10(meta['FLUX_R'] / meta['FLUX_Z'])
d4000 = meta['D4000']
size, col = _markers()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
ax1.scatter(rz, gr, s=30, c='lightgray', edgecolor='k')
ax1.scatter(rz[iarch], gr[iarch], c=col, marker='o', s=size,
alpha=1, edgecolor='k')
ax1.set_xlabel(r'$r - z$')
ax1.set_ylabel(r'$g - r$')
ax1.grid(True)
ax2.scatter(d4000, rz, s=30, c='lightgray', edgecolor='k')
ax2.scatter(d4000[iarch], rz[iarch], c=col, marker='o', s=size,
alpha=1, edgecolor='k')
ax2.set_xlabel('$D_{n}(4000)$')
ax2.set_ylabel(r'$g - r$')
ax2.grid(True)
fig.subplots_adjust(wspace=0.3)
qa_colorcolor()
def qa_archetypes(ncol=5, nfilter=11):
"""Plot the archetypes and the spectra for which they're responsible."""
from scipy.signal import medfilt
_, col = _markers()
narch = len(iarch)
nrow = np.ceil(narch / ncol).astype('int')
fig, ax = plt.subplots(nrow, ncol, figsize=(2.5*ncol, 2.5*nrow), sharey=True, sharex=True)
ww = (hireswave > 3000) * (hireswave < 1e4)
for jj, (thisax, indx, rindx, rr) in enumerate(zip(ax.flat, iarch, respindx, resp)):
if rr > 1:
for ii in rindx:
thisax.plot(hireswave[ww], hiresflux[ii, ww], color='lightgrey')
smoothflux = medfilt(hiresflux[indx, ww], 11)
thisax.plot(hireswave[ww], smoothflux, color=col[jj])
thisax.xaxis.set_major_locator(plt.MaxNLocator(2))
thisax.text(0.95, 0.93, '{:04d}\nResp={}'.format(indx, rr), ha='right',
va='top', transform=thisax.transAxes, fontsize=11)
fig.subplots_adjust(wspace=0.05, hspace=0.05)
qa_archetypes()
def qa_ages_colormag():
"""Generate a color-magnitude plot for the original AGES sample.
"""
Mr = basemeta['SDSS_UGRIZ_ABSMAG_Z01'][:, 2]
gr = basemeta['SDSS_UGRIZ_ABSMAG_Z01'][:, 1]-basemeta['SDSS_UGRIZ_ABSMAG_Z01'][:, 2]
size, col = _markers()
fig, ax = plt.subplots(figsize=(6, 4))
ax.scatter(Mr, gr, s=30, c='lightgray', edgecolor='k')
ax.scatter(Mr[iarch], gr[iarch], c=col, marker='o', s=size,
alpha=1, edgecolor='k')
ax.set_xlabel(r'$M_{0.1r}$')
ax.set_ylabel(r'$^{0.1}(g - r)$')
ax.set_xlim(-16, -23)
ax.set_ylim(0, 1.3)
ax.grid(True)
qa_ages_colormag()
"""
Explanation: Generate some QAplots.
End of explanation
"""
|
tanmay987/deepLearning | image-classification/dlnd_image_classification.ipynb | mit | """
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
# Use Floyd's cifar-10 dataset if present
floyd_cifar10_location = '/input/cifar-10/python.tar.gz'
if isfile(floyd_cifar10_location):
tar_gz_path = floyd_cifar10_location
else:
tar_gz_path = 'cifar-10-python.tar.gz'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(tar_gz_path):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
tar_gz_path,
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open(tar_gz_path) as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
"""
Explanation: Image Classification
In this project, you'll classify images from the CIFAR-10 dataset. The dataset consists of airplanes, dogs, cats, and other objects. You'll preprocess the images, then train a convolutional neural network on all the samples. The images need to be normalized and the labels need to be one-hot encoded. You'll get to apply what you learned and build a convolutional, max pooling, dropout, and fully connected layers. At the end, you'll get to see your neural network's predictions on the sample images.
Get the Data
Run the following cell to download the CIFAR-10 dataset for python.
End of explanation
"""
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 2
sample_id = 6
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
"""
Explanation: Explore the Data
The dataset is broken into batches to prevent your machine from running out of memory. The CIFAR-10 dataset consists of 5 batches, named data_batch_1, data_batch_2, etc.. Each batch contains the labels and images that are one of the following:
* airplane
* automobile
* bird
* cat
* deer
* dog
* frog
* horse
* ship
* truck
Understanding a dataset is part of making predictions on the data. Play around with the code cell below by changing the batch_id and sample_id. The batch_id is the id for a batch (1-5). The sample_id is the id for a image and label pair in the batch.
Ask yourself "What are all possible labels?", "What is the range of values for the image data?", "Are the labels in order or random?". Answers to questions like these will help you preprocess the data and end up with better predictions.
End of explanation
"""
def normalize(x):
"""
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
"""
# TODO: Implement Function
return x/255
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_normalize(normalize)
"""
Explanation: Implement Preprocess Functions
Normalize
In the cell below, implement the normalize function to take in image data, x, and return it as a normalized Numpy array. The values should be in the range of 0 to 1, inclusive. The return object should be the same shape as x.
End of explanation
"""
def one_hot_encode(x):
"""
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
"""
for i,label in enumerate(x):
A=np.zeros(10)
A[label]=1
if(i==0):
oneHotEncode=A
else:
oneHotEncode=np.vstack([oneHotEncode,A])
return oneHotEncode
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_one_hot_encode(one_hot_encode)
"""
Explanation: One-hot encode
Just like the previous code cell, you'll be implementing a function for preprocessing. This time, you'll implement the one_hot_encode function. The input, x, are a list of labels. Implement the function to return the list of labels as One-Hot encoded Numpy array. The possible values for labels are 0 to 9. The one-hot encoding function should return the same encoding for each value between each call to one_hot_encode. Make sure to save the map of encodings outside the function.
Hint: Don't reinvent the wheel.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
"""
Explanation: Randomize Data
As you saw from exploring the data above, the order of the samples are randomized. It doesn't hurt to randomize it again, but you don't need to for this dataset.
Preprocess all the data and save it
Running the code cell below will preprocess all the CIFAR-10 data and save it to file. The code below also uses 10% of the training data for validation.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
(valid_labels.shape
)
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
import tensorflow as tf
def neural_net_image_input(image_shape):
"""
Return a Tensor for a batch of image input
: image_shape: Shape of the images
: return: Tensor for image input.
"""
# TODO: Implement Function
return tf.placeholder(tf.float32,shape=(None,*image_shape),name='x')
def neural_net_label_input(n_classes):
"""
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
"""
# TODO: Implement Function
return tf.placeholder(tf.float32,shape=(None,n_classes),name='y')
def neural_net_keep_prob_input():
"""
Return a Tensor for keep probability
: return: Tensor for keep probability.
"""
# TODO: Implement Function
return tf.placeholder(tf.float32,name='keep_prob')
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
"""
Explanation: Build the network
For the neural network, you'll build each layer into a function. Most of the code you've seen has been outside of functions. To test your code more thoroughly, we require that you put each layer in a function. This allows us to give you better feedback and test for simple mistakes using our unittests before you submit your project.
Note: If you're finding it hard to dedicate enough time for this course each week, we've provided a small shortcut to this part of the project. In the next couple of problems, you'll have the option to use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages to build each layer, except the layers you build in the "Convolutional and Max Pooling Layer" section. TF Layers is similar to Keras's and TFLearn's abstraction to layers, so it's easy to pickup.
However, if you would like to get the most out of this course, try to solve all the problems without using anything from the TF Layers packages. You can still use classes from other packages that happen to have the same name as ones you find in TF Layers! For example, instead of using the TF Layers version of the conv2d class, tf.layers.conv2d, you would want to use the TF Neural Network version of conv2d, tf.nn.conv2d.
Let's begin!
Input
The neural network needs to read the image data, one-hot encoded labels, and dropout keep probability. Implement the following functions
* Implement neural_net_image_input
* Return a TF Placeholder
* Set the shape using image_shape with batch size set to None.
* Name the TensorFlow placeholder "x" using the TensorFlow name parameter in the TF Placeholder.
* Implement neural_net_label_input
* Return a TF Placeholder
* Set the shape using n_classes with batch size set to None.
* Name the TensorFlow placeholder "y" using the TensorFlow name parameter in the TF Placeholder.
* Implement neural_net_keep_prob_input
* Return a TF Placeholder for dropout keep probability.
* Name the TensorFlow placeholder "keep_prob" using the TensorFlow name parameter in the TF Placeholder.
These names will be used at the end of the project to load your saved model.
Note: None for shapes in TensorFlow allow for a dynamic size.
End of explanation
"""
MEAN_INIT = 0.001
STDDEV_INIT = 0.05
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
"""
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
"""
weights=tf.Variable(tf.truncated_normal(mean=MEAN_INIT, stddev=STDDEV_INIT, shape=[*conv_ksize,int(x_tensor.shape[3]),conv_num_outputs]))
#bias=tf.Variable(tf.zeros(conv_num_outputs))
bias = tf.Variable(tf.truncated_normal(mean=MEAN_INIT, stddev=STDDEV_INIT, shape=[conv_num_outputs]))
x= tf.nn.conv2d(x_tensor,weights,[1,*conv_strides,1],padding='SAME')
x=tf.nn.bias_add(x,bias)
x=tf.nn.relu(x)
x=tf.nn.max_pool(x,[1,*pool_ksize,1],[1,*pool_strides,1], padding='SAME')
# TODO: Implement Function
return x
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_con_pool(conv2d_maxpool)
"""
Explanation: Convolution and Max Pooling Layer
Convolution layers have a lot of success with images. For this code cell, you should implement the function conv2d_maxpool to apply convolution then max pooling:
* Create the weight and bias using conv_ksize, conv_num_outputs and the shape of x_tensor.
* Apply a convolution to x_tensor using weight and conv_strides.
* We recommend you use same padding, but you're welcome to use any padding.
* Add bias
* Add a nonlinear activation to the convolution.
* Apply Max Pooling using pool_ksize and pool_strides.
* We recommend you use same padding, but you're welcome to use any padding.
Note: You can't use TensorFlow Layers or TensorFlow Layers (contrib) for this layer, but you can still use TensorFlow's Neural Network package. You may still use the shortcut option for all the other layers.
End of explanation
"""
def flatten(x_tensor):
"""
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
"""
image_size=int(x_tensor.shape[1]*x_tensor.shape[2]*x_tensor.shape[3])
return tf.reshape(x_tensor,[-1,image_size])
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_flatten(flatten)
"""
Explanation: Flatten Layer
Implement the flatten function to change the dimension of x_tensor from a 4-D tensor to a 2-D tensor. The output should be the shape (Batch Size, Flattened Image Size). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages.
End of explanation
"""
def fully_conn(x_tensor, num_outputs):
"""
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
"""
# TODO: Implement Function
W=tf.Variable(tf.truncated_normal(mean=MEAN_INIT, stddev=STDDEV_INIT, shape=[x_tensor.shape[1].value, num_outputs]))
bias=tf.Variable(tf.truncated_normal(mean=MEAN_INIT, stddev=STDDEV_INIT, shape=[num_outputs]))
x=tf.add(tf.matmul(x_tensor,W),bias)
return tf.nn.relu(x)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_fully_conn(fully_conn)
"""
Explanation: Fully-Connected Layer
Implement the fully_conn function to apply a fully connected layer to x_tensor with the shape (Batch Size, num_outputs). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages.
End of explanation
"""
def output(x_tensor, num_outputs):
"""
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
"""
# TODO: Implement Function
W=tf.Variable(tf.truncated_normal([x_tensor.shape[1].value,num_outputs]))
bias=tf.Variable(tf.zeros(num_outputs))
x=tf.add(tf.matmul(x_tensor,W),bias)
return x
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_output(output)
"""
Explanation: Output Layer
Implement the output function to apply a fully connected layer to x_tensor with the shape (Batch Size, num_outputs). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages.
Note: Activation, softmax, or cross entropy should not be applied to this.
End of explanation
"""
def conv_net(x, keep_prob):
"""
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
"""
conv_num_outputs=[]
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
x = conv2d_maxpool(x, 16, (5, 5), (1, 1), (2, 2), (2, 2))
x = conv2d_maxpool(x, 32, (5, 5), (1, 1), (2, 2), (2, 2))
x = conv2d_maxpool(x, 64, (5, 5), (1, 1), (2, 2), (2, 2))
# TODO: Apply a Flatten Layer
# Function Definition from Above:
x=flatten(x)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
x=fully_conn(x, num_outputs=786)
x=fully_conn(x, num_outputs=786)
x = tf.nn.dropout(x, keep_prob)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
x=output(x, num_outputs=10)
# TODO: return output
return x
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
"""
Explanation: Create Convolutional Model
Implement the function conv_net to create a convolutional neural network model. The function takes in a batch of images, x, and outputs logits. Use the layers you created above to create this model:
Apply 1, 2, or 3 Convolution and Max Pool layers
Apply a Flatten Layer
Apply 1, 2, or 3 Fully Connected Layers
Apply an Output Layer
Return the output
Apply TensorFlow's Dropout to one or more layers in the model using keep_prob.
End of explanation
"""
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
"""
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
"""
feed_dict = {'keep_prob:0': keep_probability, 'x:0': feature_batch, 'y:0': label_batch}
session.run(optimizer, feed_dict=feed_dict)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_train_nn(train_neural_network)
"""
Explanation: Train the Neural Network
Single Optimization
Implement the function train_neural_network to do a single optimization. The optimization should use optimizer to optimize in session with a feed_dict of the following:
* x for image input
* y for labels
* keep_prob for keep probability for dropout
This function will be called for each batch, so tf.global_variables_initializer() has already been called.
Note: Nothing needs to be returned. This function is only optimizing the neural network.
End of explanation
"""
def print_stats(session, feature_batch, label_batch, cost, accuracy):
"""
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
"""
feed_cost = {'keep_prob:0': keep_probability, 'x:0': feature_batch, 'y:0': label_batch}
cost=session.run(cost, feed_dict=feed_cost)
feed_accuracy = {'keep_prob:0': keep_probability, 'x:0': valid_features, 'y:0': valid_labels}
accuracy=session.run(accuracy,feed_dict=feed_accuracy)
print("cost: {}, accuracy: {}".format(cost, accuracy))
# TODO: Implement Function
pass
"""
Explanation: Show Stats
Implement the function print_stats to print loss and validation accuracy. Use the global variables valid_features and valid_labels to calculate validation accuracy. Use a keep probability of 1.0 to calculate the loss and validation accuracy.
End of explanation
"""
# TODO: Tune Parameters
epochs = 50
batch_size = 128
keep_probability = 0.8
"""
Explanation: Hyperparameters
Tune the following parameters:
* Set epochs to the number of iterations until the network stops learning or start overfitting
* Set batch_size to the highest number that your machine has memory for. Most people set them to common sizes of memory:
* 64
* 128
* 256
* ...
* Set keep_probability to the probability of keeping a node using dropout
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
"""
Explanation: Train on a Single CIFAR-10 Batch
Instead of training the neural network on all the CIFAR-10 batches of data, let's use a single batch. This should save time while you iterate on the model to get a better accuracy. Once the final validation accuracy is 50% or greater, run the model on all the data in the next section.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
"""
Explanation: Fully Train the Model
Now that you got a good accuracy with a single CIFAR-10 batch, try it with all five batches.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
"""
Test the saved model against the test dataset
"""
test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
"""
Explanation: Checkpoint
The model has been saved to disk.
Test Model
Test your model against the test dataset. This will be your final accuracy. You should have an accuracy greater than 50%. If you don't, keep tweaking the model architecture and parameters.
End of explanation
"""
|
hpparvi/PyTransit | notebooks/example_eclipse_model.ipynb | gpl-2.0 | %pylab inline
sys.path.append('..')
from pytransit import EclipseModel
seed(0)
times_sc = linspace(0.5, 2.5, 5000) # Short cadence time stamps
times_lc = linspace(0.5, 2.5, 500) # Long cadence time stamps
k, t0, p, a, i, e, w = 0.1, 1., 2.0, 4.2, 0.5*pi, 0.25, 0.4*pi
ns = 50
ks = normal(k, 0.01, ns)
t0s = normal(t0, 1e-5, ns)
ps = normal(p, 1e-5, ns)
aas = normal(a, 0.01, ns)
iis = normal(i, 1e-5, ns)
es = uniform(0, 0.3, ns)
ws = uniform(0, 2*pi, ns)
frs = normal(0.01, 1e-5, ns)
"""
Explanation: Eclipse model
The eclipse model, pytransit.EclipseModel, can be used to model a secondary eclipse. The model is similar to pytransit.UniformModel, but the eclipse occurs correctly where it should based on the orbital eccentricity and argument of periastron, and the model takes the planet-star flux ratio as an additional free parameter. The model is parallelised using numba, and the number of threads can be set using the NUMBA_NUM_THREADS environment variable.
End of explanation
"""
tm = EclipseModel()
"""
Explanation: Model initialization
The eclipse model doesn't take any special initialization arguments, so the initialization is straightforward.
End of explanation
"""
tm.set_data(times_sc)
"""
Explanation: Data setup
Homogeneous time series
The model needs to be set up by calling set_data() before it can be used. At its simplest, set_data takes the mid-exposure times of the time series to be modelled.
End of explanation
"""
def plot_transits(tm, fmt='k', tc_label=True):
fig, axs = subplots(1, 2, figsize = (13,3), constrained_layout=True, sharey=True)
flux = tm.evaluate_ps(k, t0, p, a, i, e, w)
axs[0].plot(tm.time, flux, fmt)
axs[0].set_title('Individual parameters')
flux = tm.evaluate(ks, t0s, ps, aas, iis, es, ws)
axs[1].plot(tm.time, flux.T, fmt, alpha=0.2)
axs[1].set_title('Parameter vector')
if tc_label:
for ax in axs:
ax.axvline(t0, c='k', ls='--')
ax.text(t0-0.01, 0.999, 'Transit centre', rotation=90, va='top', ha='right')
setp(axs[0], ylabel='Normalised flux')
setp(axs, xlabel='Time [days]', xlim=tm.time[[0,-1]])
tm.set_data(times_sc)
plot_transits(tm)
"""
Explanation: Model use
Evaluation
The transit model can be evaluated using either a set of scalar parameters, a parameter vector (1D ndarray), or a parameter vector array (2D ndarray). The model flux is returned as a 1D ndarray in the first two cases, and a 2D ndarray in the last (one model per parameter vector).
tm.evaluate_ps(k, t0, p, a, i, e=0, w=0) evaluates the model for a set of scalar parameters, where k is the radius ratio, t0 the zero epoch, p the orbital period, a the semi-major axis divided by the stellar radius, i the inclination in radians, e the eccentricity, and w the argument of periastron. Eccentricity and argument of periastron are optional, and omitting them defaults to a circular orbit.
tm.evaluate_pv(pv) evaluates the model for a 1D parameter vector, or 2D array of parameter vectors. In the first case, the parameter vector should be array-like with elements [k, t0, p, a, i, e, w]. In the second case, the parameter vectors should be stored in a 2d ndarray with shape (npv, 7) as
[[k1, t01, p1, a1, i1, e1, w1],
[k2, t02, p2, a2, i2, e2, w2],
...
[kn, t0n, pn, an, in, en, wn]]
The reason for the different options is that the model implementations may have optimisations that make the model evaluation for a set of parameter vectors much faster than if computing them separately. This is especially the case for the OpenCL models.
End of explanation
"""
tm.set_data(times_lc, nsamples=10, exptimes=0.01)
plot_transits(tm)
"""
Explanation: Supersampling
The eclipse model can be supersampled by setting the nsamples and exptimes arguments in set_data.
End of explanation
"""
times_1 = linspace(1.5, 2.0, 500)
times_2 = linspace(2.0, 2.5, 10)
times = concatenate([times_1, times_2])
lcids = concatenate([full(times_1.size, 0, 'int'), full(times_2.size, 1, 'int')])
nsamples = [1, 10]
exptimes = [0, 0.0167]
tm.set_data(times, lcids, nsamples=nsamples, exptimes=exptimes)
plot_transits(tm, 'k.-', tc_label=False)
"""
Explanation: Heterogeneous time series
PyTransit allows for heterogeneous time series, that is, a single time series can contain several individual light curves (with, e.g., different time cadences and required supersampling rates) observed (possibly) in different passbands.
If a time series contains several light curves, it also needs the light curve indices for each exposure. These are given through lcids argument, which should be an array of integers. If the time series contains light curves observed in different passbands, the passband indices need to be given through pbids argument as an integer array, one per light curve. Supersampling can also be defined on per-light curve basis by giving the nsamplesand exptimes as arrays with one value per light curve.
For example, a set of three light curves, two observed in one passband and the third in another passband
times_1 (lc = 0, pb = 0, sc) = [1, 2, 3, 4]
times_2 (lc = 1, pb = 0, lc) = [3, 4]
times_3 (lc = 2, pb = 1, sc) = [1, 5, 6]
Would be set up as
tm.set_data(time = [1, 2, 3, 4, 3, 4, 1, 5, 6],
lcids = [0, 0, 0, 0, 1, 1, 2, 2, 2],
pbids = [0, 0, 1],
nsamples = [ 1, 10, 1],
exptimes = [0.1, 1.0, 0.1])
Example: two light curves with different cadences
End of explanation
"""
|
Chemcy/vnpy | vn.tutorial/performance/Performance of Receiving Tick Data.ipynb | mit | from datetime import datetime, time
import time as gtime
import pymongo
from dateutil.parser import parse
"""
Explanation: vnpy接收行情数据性能测试与改进优化
by Jerry He, 2016.12,
讨论:https://zhuanlan.zhihu.com/p/24662087
近来,量化交易平台vnpy因其开源、功能强大、开发容易、可定制性强的特点,目前已经被广泛应用在量化交易中。
行情数据落地是量化交易平台必须解决的一个基础问题,它有两个方面的作用:一是供策略开发时进行分析、回测;二是为实盘程序时提供近期的历史数据。前者可以通过传统效率更高的实现方式(比如我们有基于C++和leveldb实现的行情数据接收、转发、历史数据获取程序)实现,也可以通过向数据提供方购买获取。但是对于后者,直接基于vnpy落地近期的数据是更为简易的方式。
vnpy包含行情落地模块dataRecorder,已经实现了tick数据、分钟bar数据保存功能。
本工作主要包括:
- vnpy原落地函数的性能考查
- 针对CTP接口,原落地函数的修正与优化
以下所有性能测试时间单位均为毫秒。
测试基于windows 7, i7 3.4GHz.
End of explanation
"""
TICK_DB_NAME='Test'
EMPTY_STRING = ''
EMPTY_UNICODE = u''
EMPTY_INT = 0
EMPTY_FLOAT = 0.0
class DrTickData(object):
"""Tick数据"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.vtSymbol = EMPTY_STRING # vt系统代码
self.symbol = EMPTY_STRING # 合约代码
self.exchange = EMPTY_STRING # 交易所代码
# 成交数据
self.lastPrice = EMPTY_FLOAT # 最新成交价
self.volume = EMPTY_INT # 最新成交量
self.openInterest = EMPTY_INT # 持仓量
self.upperLimit = EMPTY_FLOAT # 涨停价
self.lowerLimit = EMPTY_FLOAT # 跌停价
# tick的时间
self.date = EMPTY_STRING # 日期
self.time = EMPTY_STRING # 时间
self.datetime = None # python的datetime时间对象
# 五档行情
self.bidPrice1 = EMPTY_FLOAT
self.bidPrice2 = EMPTY_FLOAT
self.bidPrice3 = EMPTY_FLOAT
self.bidPrice4 = EMPTY_FLOAT
self.bidPrice5 = EMPTY_FLOAT
self.askPrice1 = EMPTY_FLOAT
self.askPrice2 = EMPTY_FLOAT
self.askPrice3 = EMPTY_FLOAT
self.askPrice4 = EMPTY_FLOAT
self.askPrice5 = EMPTY_FLOAT
self.bidVolume1 = EMPTY_INT
self.bidVolume2 = EMPTY_INT
self.bidVolume3 = EMPTY_INT
self.bidVolume4 = EMPTY_INT
self.bidVolume5 = EMPTY_INT
self.askVolume1 = EMPTY_INT
self.askVolume2 = EMPTY_INT
self.askVolume3 = EMPTY_INT
self.askVolume4 = EMPTY_INT
self.askVolume5 = EMPTY_INT
def insertData(db,collection,data):
client[db][collection].insert(data.__dict__)
def procecssTickEvent(tick, insertDB=False):
"""处理行情推送"""
vtSymbol = tick.vtSymbol
# 转化Tick格式
drTick = DrTickData()
d = drTick.__dict__
for key in d.keys():
if key != 'datetime':
d[key] = tick.__dict__[key]
drTick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S.%f')
# 更新Tick数据
if insertDB:
insertData(TICK_DB_NAME, vtSymbol, drTick)
"""
Explanation: 重构vnpy接收行情数据代码,以用于测试
End of explanation
"""
client=pymongo.MongoClient()
data=client['VnTrader_Tick_Db']['rb1705'].find_one({})
del data['_id']
class InputTick: pass
tick=InputTick()
tick.__dict__.update(data)
print tick.__dict__
"""
Explanation: 创建一个用于测试的Tick数据
End of explanation
"""
def profiling(count,func=None):
if func==None: func=lambda: procecssTickEvent(tick)
t0=gtime.time()
for i in range(count):
func()
total_time=(gtime.time()-t0)
return total_time*1000/count
test_count=10000
original_nodb=profiling(test_count)
client.drop_database(TICK_DB_NAME)
original_db=profiling(test_count,func=lambda: procecssTickEvent(tick,insertDB=True))
print '原版不保存数据到mongodb单次耗时:%.4f' %original_nodb
print '原版含保存数据到mongodb单次耗时:%.4f' %original_db
"""
Explanation: 测试原版函数性能
End of explanation
"""
#过滤掉的时间区间,注意集合竞价tick被过滤了。
invalid_sections=[(time(2,30,59),time(9,0,0)),
(time(11,30,59),time(13,0,0)),
(time(15,15,0),time(21,0,0))]
#本地时间在此区间时对收到的Tick数据不处理,避免有时期货公司会抽风把数据重推一次。
invalid_local_section=(time(5,0,0),time(8,30,0))
def procecssTickEvent(tick, insertDB=False):
"""处理行情推送"""
# 1. 本地时间检查
local_datetime=datetime.now()
local_time=local_datetime.time()
if local_time>invalid_local_section[0] and local_time<invalid_local_section[1]:
return
# 2. 转化Tick格式
drTick = DrTickData()
d = drTick.__dict__
for key in d.keys():
if key != 'datetime':
d[key] = tick.__dict__[key]
#防御时间格式变为 ”9:00:00.5"
if tick.time[2] != ':':
tick.time = '0' + tick.time
tick_hour = int(tick.time[0:2])
local_hour = local_time.hour
real_date=local_datetime
if tick_hour == 23 and local_hour == 0:#行情时间慢于系统时间
real_date+=timedelta(-1)
elif tick_hour == 0 and local_hour == 23:#系统时间慢于行情时间
real_date+=timedelta(1)
tick.time = tick.time.ljust(12,'0')
drTick.datetime = datetime(real_date.year,real_date.month,real_date.day,
int(tick.time[0:2]), int(tick.time[3:5]), int(tick.time[6:8]), int(tick.time[9:12])*1000)
tmpTime=drTick.datetime.time()
for sec in invalid_sections:
if tmpTime>sec[0] and tmpTime<sec[1]:
return
# 3. 更新Tick数据
if insertDB:
insertData(TICK_DB_NAME, tick.vtSymbol, drTick)
procecssTickEvent(tick)
new_nodb=profiling(test_count)
client.drop_database(TICK_DB_NAME)
new_db=profiling(test_count,func=lambda: procecssTickEvent(tick,insertDB=True))
print '新版不保存数据到mongodb单次耗时:%.4f' %original_nodb
print '新版含保存数据到mongodb单次耗时:%.4f' %original_db
"""
Explanation: 改进版本
原版程序使用CTP接口保存期货数据时,存在几个问题:
- 非交易时间收到的野数据没有被过滤掉
- 当前各交易所提供的date字段混乱,有的使用真实日期,有的使用交易日,导致计算的datetime字段也是有问题的
针对以上问题的改进版本如下:
End of explanation
"""
def insertData(db,collection,data):
for key in data.__dict__:
fout.write(str(data.__dict__[key])+',')
fout=open('D:/test.txt','w')
new_db_text=profiling(test_count,func=lambda: procecssTickEvent(tick,insertDB=True))
print '新版含保存数据到text file单次耗时:%.4f' %original_db
fout.close()
"""
Explanation: 保存为文本文件效率
End of explanation
"""
time_convert1=profiling(10000,lambda:parse('20161212 21:21:21.5'))
time_convert2=profiling(10000,lambda:datetime.strptime('20161212 21:21:21.5', '%Y%m%d %H:%M:%S.%f'))
def customized_parse(s):
s=s.ljust(21,'0')
return datetime(int(s[0:4]),int(s[4:6]),int(s[6:8]),int(s[9:11]), int(s[12:14]), int(s[15:17]), int(s[18:21])*1000 )
time_convert3=profiling(10000,lambda:customized_parse('20161212 21:21:21.5'))
print '转化方法1耗时:%.4f' %time_convert1
print '转化方法2耗时:%.4f' %time_convert2
print '转化方法3耗时:%.4f' %time_convert3
"""
Explanation: 时间类型转化效率
注意到不保存数据到数据的版本中,新版相比老版耗时反而降低了,这主要是由于时间转化函数的改写。
如下三种时间转化方法效率差别巨大:
End of explanation
"""
import pandas as pd
df=pd.DataFrame([{u'无数据写入':original_nodb,u'mongodb写入':original_db},
{u'无数据写入': new_nodb, u'mongodb写入': new_db, u'text文件写入':new_db_text}
],index=['原版','新版'])
df
"""
Explanation: 总结
End of explanation
"""
|
CAChemE/curso-python-datos | notebooks/011-NumPy-CaracteristicasArrays.ipynb | bsd-3-clause | import numpy as np
lista = [ 1, 1+2j, True, 'aerodinamica', [1, 2, 3] ]
lista
"""
Explanation: Características de los arrays de NumPy
En este notebook veremos como las principales características de los arrays de NumPy y cómo mejoran la eficiencia de nuestro código.
El objeto tipo array que proporciona NumPy (Python ya dispone de un tipo array que sirve para almacenar elementos de igual tipo pero no proporciona toda la artillería matemática necesaria como para hacer operaciones de manera rápida y eficiente) se caracteriza por:
1) Homogeneidad de tipo:
Comencemos viendo que ocurre con las listas:
End of explanation
"""
array = np.array([ 1, 1+2j, True, 'aerodinamica'])
array
"""
Explanation: En el caso de los arrays:
End of explanation
"""
print(id(lista))
lista.append('fluidos')
print(lista)
print(id(lista))
print(id(array))
array = np.append(array, 'fluidos')
print(array)
print(id(array))
"""
Explanation: ¿Todo bien? Pues no. Mientras que en la lista cada elemento conserva su tipo, en el array, todos han de tener el mismo y NumPy ha considerado que todos van a ser string.
2) Tamaño fijo en el momento de la creación:
¡Tranquilo! los allocate son automáticos...
Igual que en el caso anterior, comencemos con la lista:
End of explanation
"""
lista = list(range(0,100000))
type(lista)
%%timeit
sum(lista)
array = np.arange(0, 100000)
%%timeit
np.sum(array)
"""
Explanation: Si consultamos la ayuda de la función np.append escribiendo en una celda help(np.append) podemos leer:
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that `append` does not occur in-place: a new array is allocated and filled. If `axis` is None, `out` is a flattened array.
3) Eficiencia
Hasta el momento los arrays han demostrado ser bastante menos flexibles que las listas, luego olvidemos estos últimos 10 minutos y manejemos siempre listas... ¿no? ¡Pues no! Los arrays realizan una gestión de la memoria mucho más eficiente que mejora el rendimiento.
Prestemos atención ahora a la velocidad de ejecución gracias a la función mágica %%timeit, que colocada al inicio de una celda nos indicará el tiempo que tarda en ejecutarse.
End of explanation
"""
def my_linspace_FORTRAN(start, stop, number=50):
x = np.empty(number)
step = (stop - start) / (number - 1)
for ii in range(number):
x[ii] = ii * step
x += start
return x
def my_linspace_PYTHONIC(start, stop, number=50):
step = (stop - start) / (number - 1)
x = np.array([ii * step for ii in range(number)]) #esto es una list comprehension
x += start
return x
%%timeit
np.linspace(0,100,1000000)
%%timeit
my_linspace_FORTRAN(0,100,1000000)
%%timeit
my_linspace_PYTHONIC(0,100,1000000)
"""
Explanation: Como ves, las mejoras en este caso son de 2 órdenes de magnitud. NumPy nos ofrece funciones que se ejecutan prácticamente en tiempos de lenguaje compilado (Fortran, C, C++) y optimizado, pero escribiendo mucho menos código y con un nivel de abstracción mayor. Conociendo una serie de buenas prácticas, podremos competir en velocidad con nuestros códigos en Python. Para casos en los que no sea posible, existen herramientas que nos permiten ejecutar desde Python nuestros códigos en otros lengujes como f2py. Este tema puede resultarte algo avanzado a estas alturas, pero bastante útil; puedes consultar este artículo de pybonacci si lo necesitas.
Ejercicio
Para recordar los primeras lecciones vamos a implementar nuestra propia función linspace usando un bucle (estilo FORTRAN) y usando una list comprehension (estilo pythonico). Después compararemos el rendimiento comparado con la de NumPy.
End of explanation
"""
|
sytays/openanalysis | doc/Langauge/13 - Introduction to Object Oriented Programming in Python.ipynb | gpl-3.0 | class Student:
count = 0 # Total number of objects created so far, it is static variable as it is declared outside
def __init__(self,name,usn,marks):
"""
Constructor of class Student
Input: name - name of the student : string
usn - university serial number : string
marks - marks in 3 subjects out of 20
"""
Student.count += 1
self.name = name
self.usn = usn
self.marks = marks[:] # Copy marks to self.marks .. a simple self.marks = marks make only reference equal
def print_details(self):
print(str(self))
def total_marks(self):
return sum(self.marks)
def __iter__(self):
details = {'name':self.name,'usn':self.usn,'marks':self.marks}
for k,v in details.items():
yield k,v # A tuple
def __str__(self):
return "Name : {0} \nUSN = {1}\nMarks in 3 subjects = {2}".format(self.name,self.usn,self.marks)
@staticmethod
def get_total_count():
return Student.count
s1 = Student('Ramesh','4jc11cs111',[20,16,18])
s2 = Student('Ravi','4jc15cs112',[15,18,18])
print(s1) # calls __str__()
print(s2)
Student.count
Student.get_total_count()
for k,v in s1:
print('{} = {}'.format(k,v))
s1.print_details() # self of Student.print_details(self) is passed as s1
Student.print_details(s1) # Explicitly passing self parameter
Student.get_total_count()
s1.get_total_count() # This is also possible, @staticmethod attribute prevents passing object to method
"""
Explanation: Object Oriented Programming
In Object Oriented Programming, everything is an object. Objects are real world entities having some attributes and some related methods that operate on attributes. We assume that the reader has some familiarity with Object Oriented Concepts such as Inheritance, Polymorphism, Abstraction and so on ...
Defining Classes
Syntax:
python
class ClassName:
<statement 1>
<statement 2>
....
....
<statement n>
Special Methods inside the class
Unlike C++ and Java classes, class methods does not hold the reference of current object (this object). Class methods should take the class object as their first argument. This is not required for static methods. At the point of invocation of object methods, the object is passed to method implicitly. It is a covention to name the first parameter of class method as self. Now let's see some special functions of classes.
__init__(self,elements) : Constructor, called when object is created. All properties of the object have to be declared here.
__del__(self) : Destructor, called when del is applied to an object.
__str__(self) : Returns the string representation of object. Called when str() is called on the object.
__iter__(self) : Returns the iterator of elements of the object. Called when iter() is called on the object. Also this enables us to use the for ele in object like construct.
__len(self)__ : Returns the length of the collection. Called when len() is invoked on the object.
__getitem(self,item)__ : Allows us to use object[item] like accessor to get an item
Static members and methods
Any member declared inside the class, but not in the methods, are shared by all instances of classes. A method annotated with @staticmethod is static method, and doesn't recieve class object as it's first parameter.
A note on private members
A member or method whose name starts with '__' is regarded as a private member or method.
A sample class, Student
Here we implement a simple Student class.
End of explanation
"""
class Duck:
def quack(self):
print("Quaaaaaack!")
def feathers(self):
print("The duck has white and gray feathers.")
class Person:
def quack(self):
print("The person imitates a duck.")
def feathers(self):
print("The person takes a feather from the ground and shows it.")
def name(self):
print("John Smith")
def in_the_forest(duck):
duck.quack()
duck.feathers()
def game():
donald = Duck()
john = Person()
in_the_forest(donald)
in_the_forest(john)
game()
"""
Explanation: Duck Typing and Interfaces
In C, C++, Java and C#, we have to predefine the data type of every variable declared. In Python, you may have observed that you are not defining any data type during variable declaration. In fact, Python does not require you to do that.
In C,
c
int x;
means storage space allocated to x is constant 8 bytes (on x64 system) and this space will never change. This also implies that x will never hold other values than int. Trying to do so will raise a compiler error. This nature of C makes the language statically typed, i.e., data type of a variable is determined at the compile time.
On the other hand, in Python, the type of variable is determined entirely during runtime. Storage space allocated to a variable can vary dynamically. When we assign a string to a variable x, it will be str. If we reassign it to a list, it will be list. This nature of Python makes it dynamically typed language. It is also called as Duck typing.
Duck typing is an application of the duck test in type safety. It requires that type checking be deferred to runtime, and is implemented by means of dynamic typing or reflection.
The Duck test is a humorous term for a form of abductive reasoning. This is its usual expression:
If it looks like a duck, swims like a duck, and quacks like a duck, then it probably is a duck.
The duck test can be seen in the following example. As far as the function in_the_forest is concerned, the Person object is a duck:
End of explanation
"""
x = 8
type(x)
type(8.5)
type('hello')
type([1,2,1])
type({})
type((1,))
type(s1)
import random
type(random)
"""
Explanation: type() - Obtaining the data type of a variable
End of explanation
"""
|
CORE-GATECH-GROUP/serpent-tools | examples/DepletionMatrix.ipynb | mit | %matplotlib inline
import os
mtxFile = os.path.join(
os.environ["SERPENT_TOOLS_DATA"],
"depmtx_ref.m")
"""
Explanation: Copyright (c) 2017-2020 Serpent-Tools developer team, GTRC
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Data files are not included with the python package, but can be downloaded from the GitHub repository. For this tutorial, the files are placed in the directory identified with the SERPENT_TOOLS_DATA environment variable.
End of explanation
"""
import serpentTools
reader = serpentTools.read(mtxFile)
reader
"""
Explanation: Depletion Matrix
The serpentTools package supports reading depletion matrix files, generated when set depmtx 1 is added to the input file.
As of SERPENT 2.1.30, these files contain
1. The length of time for a depletion interval
2. Vector of initial concentrations for all isotopes present in the depletion problem
3. ZAI vector
4. Depletion matrix
5. Vector of final concentrations following one depletion event.
Files such as this are present for each burnable material tracked by SERPENT and at each time step in the problem.
This document will demonstrate the DepmtxReader, designed to store these arrays.
NOTE The depletion matrices can be very large for many problems, ~1000 x 1000 elements. For this end, the DepmtxReader can store the matrices in Compressed Sparse Column csc_matrix form or as full numpy arrays. The reader will use the sparse format if scipy is installed unless told not to directly.
End of explanation
"""
reader.n0
"""
Explanation: We have access to all the data present in the file directly on the reader.
End of explanation
"""
reader.zai
"""
Explanation: This input file did not include fission yield libraries for depletion in order to reduce the size of the depletion matrix from ~1000 x 1000 to 74 x 74.
Number densities and quantities in the depletion matrix are stored as longfloat types, as they contain many signifiicant digits in the output files.
End of explanation
"""
reader.sparse
reader.depmtx
"""
Explanation: One can easily check if the depletion matrix is sparse by using the sparse attribute on the reader
End of explanation
"""
reader.plotDensity()
"""
Explanation: A simple plot method can be used to plot initial concentrations, final concentrations, or both.
End of explanation
"""
reader.plotDensity(
what='n0', # plot initial value
markers='>', # marker for scatter plot
labels='$N_0$', # labels for each entry plotted
ylim=1E-30, # set the lower y-axis limit
)
"""
Explanation: Some options can be passed to improve the look and feel of the plot
End of explanation
"""
|
DallasTrinkle/Onsager | examples/Fe-C.ipynb | mit | import sys
sys.path.extend(['../'])
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
%matplotlib inline
import onsager.crystal as crystal
import onsager.OnsagerCalc as onsager
from scipy.constants import physical_constants
kB = physical_constants['Boltzmann constant in eV/K'][0]
"""
Explanation: Fe-C diffusion and elastodiffusivity
Taking data from R.G.A. Veiga, M. Perez, C. Becquart, E. Clouet and C. Domain, Acta mater. 59 (2011) p. 6963 doi:10.1016/j.actamat.2011.07.048
Fe in the body-centered cubic phase, $a_0 = 0.28553\text{ nm}$; C sit at octahedral sites, where the transition states between octahedral sites are represented by tetrahedral sites. The data is obtained from an EAM potential, where $C_{11} = 243\text{ GPa}$, $C_{12}=145\text{ GPa}$, and $C_{44} = 116\text{ GPa}$. The tetrahedral transition state is 0.816 eV above the octahedral site, and the attempt frequency is taken as 10 THz ($10^{13}\text{ Hz}$).
The dipole tensors can be separated into parallel and perpendicular components; the parallel direction points towards the closest Fe atoms for the C, while the perpendicular components lie in the interstitial plane. For the octahedral, the parallel component is 8.03 eV, and the perpendicular is 3.40 eV; for the tetrahedral transition state, the parallel component is 4.87 eV, and the perpendicular is 6.66 eV.
End of explanation
"""
a0 = 0.28553
Fe = crystal.Crystal.BCC(a0, "Fe")
print(Fe)
"""
Explanation: Create BCC lattice (lattice constant in nm).
End of explanation
"""
stressconv = 1e9*1e-27*Fe.volume/physical_constants['electron volt'][0]
c11, c12, c44 = 243*stressconv, 145*stressconv, 116*stressconv
s11, s12, s44 = (c11+c12)/((c11-c12)*(c11+2*c12)), -c12/((c11-c12)*(c11+2*c12)), 1/c44
print('S11 = {:.4f} S12 = {:.4f} S44 = {:.4f}'.format(s11, s12, s44))
stensor = np.zeros((3,3,3,3))
for a in range(3):
for b in range(3):
for c in range(3):
for d in range(3):
if a==b and b==c and c==d: stensor[a,b,c,d] = s11
elif a==b and c==d: stensor[a,b,c,d] = s12
elif (a==d and b==c) or (a==c and b==d): stensor[a,b,c,d] = s44/4
"""
Explanation: Elastic constants converted from GPa ($10^9$ J/m$^3$) to eV/(atomic volume).
End of explanation
"""
uoct = np.dot(Fe.invlatt, np.array([0, 0, 0.5*a0]))
FeC = Fe.addbasis(Fe.Wyckoffpos(uoct), ["C"])
print(FeC)
"""
Explanation: Add carbon interstitial sites at octahedral sites in the lattice. This code (1) gets the set of symmetric Wyckoff positions corresponding to the single site $[00\frac12]$ (first translated into unit cell coordinates), and then adds that new basis to our Fe crystal to generate a new crystal structure, that we name "FeC".
End of explanation
"""
chem = 1 # 1 is the index corresponding to our C atom in the crystal
sitelist = FeC.sitelist(chem)
jumpnetwork = FeC.jumpnetwork(chem, 0.6*a0) # 0.6*a0 is the cutoff distance for finding jumps
FeCdiffuser = onsager.Interstitial(FeC, chem, sitelist, jumpnetwork)
print(FeCdiffuser)
"""
Explanation: Next, we construct a diffuser based on our interstitial. We need to create a sitelist (which will be the Wyckoff positions) and a jumpnetwork for the transitions between the sites. There are tags that correspond to the unique states and transitions in the diffuser.
End of explanation
"""
Dconv = 1e-2
vu0 = 10*Dconv
Etrans = 0.816
dipoledict = {'Poctpara': 8.03, 'Poctperp': 3.40,
'Ptetpara': 4.87, 'Ptetperp': 6.66}
FeCthermodict = {'pre': np.ones(len(sitelist)), 'ene': np.zeros(len(sitelist)),
'preT': vu0*np.ones(len(jumpnetwork)),
'eneT': Etrans*np.ones(len(jumpnetwork))}
# now to construct the site and transition dipole tensors; we use a "direction"--either
# the site position or the jump direction--to determine the parallel and perpendicular
# directions.
for dipname, Pname, direction in zip(('dipole', 'dipoleT'), ('Poct', 'Ptet'),
(np.dot(FeC.lattice, FeC.basis[chem][sitelist[0][0]]),
jumpnetwork[0][0][1])):
# identify the non-zero index in our direction:
paraindex = [n for n in range(3) if not np.isclose(direction[n], 0)][0]
Ppara, Pperp = dipoledict[Pname + 'para'], dipoledict[Pname + 'perp']
FeCthermodict[dipname] = np.diag([Ppara if i==paraindex else Pperp
for i in range(3)])
for k,v in FeCthermodict.items():
print('{}: {}'.format(k, v))
"""
Explanation: Next, we assemble our data: the energies, prefactors, and dipoles for the C atom in Fe, matched to the representative states: these are the first states in the lists, which are also identified by the tags above.
A note about units: If $\nu_0$ is in THz, and $a_0$ is in nm, then $a_0^2\nu_0 = 10^{-2}\text{ cm}^2/\text{s}$. Thus, we multiply by Dconv = $10^{-2}$ so that our diffusivity is output in cm<sup>2</sup>/s.
End of explanation
"""
Trange = np.linspace(300, 1200, 91)
Tlabels = Trange[0::30]
Dlist, dDlist, Vlist = [], [], []
for T in Trange:
beta = 1./(kB*T)
D, dD = FeCdiffuser.elastodiffusion(FeCthermodict['pre'],
beta*FeCthermodict['ene'],
[beta*FeCthermodict['dipole']],
FeCthermodict['preT'],
beta*FeCthermodict['eneT'],
[beta*FeCthermodict['dipoleT']])
Dlist.append(D[0,0])
dDlist.append([dD[0,0,0,0], dD[0,0,1,1], dD[0,1,0,1]])
Vtensor = (kB*T/(D[0,0]))*np.tensordot(dD, stensor, axes=((2,3),(0,1)))
Vlist.append([np.trace(np.trace(Vtensor))/3,
Vtensor[0,0,0,0], Vtensor[0,0,1,1], Vtensor[0,1,0,1]])
D0 = FeCdiffuser.diffusivity(FeCthermodict['pre'],
np.zeros_like(FeCthermodict['ene']),
FeCthermodict['preT'],
np.zeros_like(FeCthermodict['eneT']))
D, dbeta = FeCdiffuser.diffusivity(FeCthermodict['pre'],
FeCthermodict['ene'],
FeCthermodict['preT'],
FeCthermodict['eneT'],
CalcDeriv=True)
print('D0: {:.4e} cm^2/s\nEact: {:.3f} eV'.format(D0[0,0], dbeta[0,0]/D[0,0]))
D, dD = np.array(Dlist), np.array(dDlist)
d11_T = np.vstack((Trange, dD[:,0])).T
d11pos = np.array([(T,d) for T,d in d11_T if d>=0])
d11neg = np.array([(T,d) for T,d in d11_T if d<0])
fig, ax1 = plt.subplots()
ax1.plot(1./Trange, D, 'k', label='$D$')
# ax1.plot(1./Trange, dD[:,0], 'b', label='$d_{11}$')
ax1.plot(1./d11pos[:,0], d11pos[:,1], 'b', label='$d_{11}$')
ax1.plot(1./d11neg[:,0], -d11neg[:,1], 'b--')
ax1.plot(1./Trange, dD[:,1], 'r', label='$d_{12}$')
ax1.plot(1./Trange, dD[:,2], 'g-.', label='$d_{44} = D$')
ax1.set_yscale('log')
ax1.set_ylabel('$D$ [cm$^2$/s]', fontsize='x-large')
ax1.set_xlabel('$T^{-1}$ [K$^{-1}$]', fontsize='x-large')
ax1.legend(bbox_to_anchor=(0.15,0.15,0.2,0.4), ncol=1,
shadow=True, frameon=True, fontsize='x-large')
ax2 = ax1.twiny()
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks([1./t for t in Tlabels])
ax2.set_xticklabels(["{:.0f}K".format(t) for t in Tlabels])
ax2.set_xlabel('$T$ [K]', fontsize='x-large')
ax2.grid(False)
ax2.tick_params(axis='x', top='on', direction='in', length=6)
plt.show()
# plt.savefig('Fe-C-diffusivity.pdf', transparent=True, format='pdf')
d11pos[0,0], d11neg[-1,0]
"""
Explanation: We look at the diffusivity $D$, the elastodiffusivity $d$, and the activation volume tensor $V$ over a range of temperatures from 300K to 1200K.
First, we calculate all of the pieces, including the diffusivity prefactor and activation barrier. As we only have one barrier, we compute the barrier at $k_\text{B}T = 1$.
End of explanation
"""
V = np.array(Vlist)
fig, ax1 = plt.subplots()
ax1.plot(1./Trange, V[:,0], 'k', label='$V_{\\rm{total}}$')
ax1.plot(1./Trange, V[:,1], 'b', label='$V_{11}$')
ax1.plot(1./Trange, V[:,2], 'r', label='$V_{12}$')
ax1.plot(1./Trange, 2*V[:,3], 'g', label='$V_{44}$')
ax1.set_yscale('linear')
ax1.set_ylabel('$V$ [atomic volume]', fontsize='x-large')
ax1.set_xlabel('$T^{-1}$ [K$^{-1}$]', fontsize='x-large')
ax1.legend(bbox_to_anchor=(0.3,0.3,0.5,0.2), ncol=2,
shadow=True, frameon=True, fontsize='x-large')
ax2 = ax1.twiny()
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks([1./t for t in Tlabels])
ax2.set_xticklabels(["{:.0f}K".format(t) for t in Tlabels])
ax2.set_xlabel('$T$ [K]', fontsize='x-large')
ax2.grid(False)
ax2.tick_params(axis='x', top='on', direction='in', length=6)
plt.show()
# plt.savefig('Fe-C-activation-volume.pdf', transparent=True, format='pdf')
print('Total volume: {v[0]:.4f}, {V[0]:.4f}A^3\nV_xxxx: {v[1]:.4f}, {V[1]:.4f}A^3\nV_xxyy: {v[2]:.4f}, {V[2]:.4f}A^3\nV_xyxy: {v[3]:.4f}, {V[3]:.4f}A^3'.format(v=V[-1,:], V=V[-1,:]*1e3*Fe.volume))
Vsph = 0.2*(3*V[-1,1] + 2*V[-1,2] + 4*V[-1,3]) # (3V11 + 2V12 + 2V44)/5
print('Spherical average uniaxial activation volume: {:.4f} {:.4f}A^3'.format(Vsph, Vsph*1e3*Fe.volume))
"""
Explanation: Activation volume. We plot the isotropic value (change in diffusivity with respect to pressure), but also the $V_{xxxx}$, $V_{xxyy}$, and $V_{xyxy}$ terms. Interestingly, the $V_{xxxx}$ term is negative---which indicates that diffusivity along the $[100]$ direction increases with compressive stress in the $[100]$ direction.
End of explanation
"""
|
gpagliuca/pyfas | docs/notebooks/.ipynb_checkpoints/OLGA_ppl-checkpoint.ipynb | gpl-3.0 | ppl_path = '../../pyfas/test/test_files/'
fname = 'FC1_rev01.ppl'
ppl = fa.Ppl(ppl_path+fname)
"""
Explanation: OLGA ppl files, examples and howto
For an tpl file the following methods are available:
<b>filter_data</b> - return a filtered subset of trends
<b>extract</b> - extract a single trend variable
<b>to_excel</b> - dump all the data to an excel file
The usual workflow should be:
Load the correct tpl
Select the desired variable(s)
Extract the results or dump all the variables to an excel file
Post-process your data in Excel or in the notebook itself
Ppl loading
To load a specific tpl file the correct path and filename have to be provided:
End of explanation
"""
ppl.filter_data('PT')
"""
Explanation: Profile selection
As for tpl files, a ppl file may contain hundreds of profiles, in particular for complex networks. For this reason a filtering method is quite useful.
The easiest way is to filter on all the profiles using patters, the command ppl.filter_trends("PT") filters all the pressure profiless (or better, all the profiles with "PT" in the description, if you have defined a temperature profile in the position "PTTOPSIDE", for example, this profile will be selected too).
The resulting python dictionaly will have a unique index for each filtered profile that can be used to identify the interesting profile(s).
In case of an emply pattern all the available profiles will be reported.
End of explanation
"""
pd.DataFrame(ppl.filter_data('PT'), index=("Profiles",)).T
"""
Explanation: The same outpout can be reported as a pandas dataframe:
End of explanation
"""
pd.DataFrame(ppl.filter_data("TM"), index=("Profiles",)).T
pd.DataFrame(ppl.filter_data("PT"), index=("Profiles",)).T
"""
Explanation: Dump to excel
To dump all the variables in an excel file use ppl.to_excel()
If no path is provided an excel file with the same name of the tpl file is generated in the working folder. Depending on the tpl size this may take a while.
Extract a specific variable
Once you know the variable(s) index you are interested in (see the filtering paragraph above for more info) you can extract it (or them) and use the data directly in python.
Let's assume you are interested in the pressure and the temperature profile of the branch riser:
End of explanation
"""
ppl.extract(13)
ppl.extract(12)
"""
Explanation: Our targets are:
<i>variable 13</i> for the temperature
and
<i>variable 12</i> for the pressure
Now we can proceed with the data extraction:
End of explanation
"""
ppl.data.keys()
"""
Explanation: The ppl object now has the two profiles available in the data attribute:
End of explanation
"""
ppl.label[13]
"""
Explanation: while the label attibute stores the variable type:
End of explanation
"""
%matplotlib inline
geometry = ppl.data[12][0]
pt_riser = ppl.data[12][1]
tm_riser = ppl.data[13][1]
def ppl_plot(geo, v0, v1, ts):
fig, ax0 = plt.subplots(figsize=(12, 7));
ax0.grid(True)
p0, = ax0.plot(geo, v0[ts])
ax0.set_ylabel("[C]", fontsize=16)
ax0.set_xlabel("[m]", fontsize=16)
ax1 = ax0.twinx()
p1, = ax1.plot(geo, v1[ts]/1e5, 'r')
ax1.grid(False)
ax1.set_ylabel("[bara]", fontsize=16)
ax1.tick_params(axis="both", labelsize=16)
ax1.tick_params(axis="both", labelsize=16)
plt.legend((p0, p1), ("Temperature profile", "Pressure profile"), loc=3, fontsize=16)
plt.title("P and T for case FC1", size=20);
"""
Explanation: Ppl data structure
The ppl data structure at the moment contains:
the geometry profile of the branch as ppl.data[variable_index][0]
the selected profile at the timestep 0 as ppl.data[variable_index][1][0]
the selected profile at the last timestep as ppl.data[variable_index][1][-1]
In other words the first index is the variable, the second is 0 for the geometry and 1 for the data, the last one identifies the timestep.
Data processing
The results available in the data attribute are numpy arrays and can be easily manipulated and plotted:
End of explanation
"""
ppl_plot(geometry, tm_riser, pt_riser, -1)
"""
Explanation: To plot the last timestep:
End of explanation
"""
import ipywidgets.widgets as widgets
from ipywidgets import interact
timesteps=len(tm_riser)-1
@interact
def ppl_plot(ts=widgets.IntSlider(min=0, max=timesteps)):
fig, ax0 = plt.subplots(figsize=(12, 7));
ax0.grid(True)
p0, = ax0.plot(geometry, tm_riser[ts])
ax0.set_ylabel("[C]", fontsize=16)
ax0.set_xlabel("[m]", fontsize=16)
ax0.set_ylim(10, 12)
ax1 = ax0.twinx()
ax1.set_ylim(90, 130)
p1, = ax1.plot(geometry, pt_riser[ts]/1e5, 'r')
ax1.grid(False)
ax1.set_ylabel("[bara]", fontsize=16)
ax1.tick_params(axis="both", labelsize=16)
ax1.tick_params(axis="both", labelsize=16)
plt.legend((p0, p1), ("Temperature profile", "Pressure profile"), loc=3, fontsize=16)
plt.title("P and T for case FC1 @ timestep {}".format(ts), size=20);
"""
Explanation: The time can also be used as parameter:
End of explanation
"""
|
darkomen/TFG | medidas/12082015/.ipynb_checkpoints/Análisis de datos Ensayo 1-checkpoint.ipynb | cc0-1.0 | #Importamos las librerías utilizadas
import numpy as np
import pandas as pd
import seaborn as sns
#Mostramos las versiones usadas de cada librerías
print ("Numpy v{}".format(np.__version__))
print ("Pandas v{}".format(pd.__version__))
print ("Seaborn v{}".format(sns.__version__))
#Abrimos el fichero csv con los datos de la muestra
datos = pd.read_csv('1119700.CSV')
%pylab inline
#Almacenamos en una lista las columnas del fichero con las que vamos a trabajar
columns = ['Diametro X','Diametro Y', 'RPM TRAC']
#Mostramos un resumen de los datos obtenidoss
datos[columns].describe()
#datos.describe().loc['mean',['Diametro X [mm]', 'Diametro Y [mm]']]
"""
Explanation: Análisis de los datos obtenidos
Uso de ipython para el análsis y muestra de los datos obtenidos durante la producción.Se implementa un regulador experto. Los datos analizados son del día 12 de Agosto del 2015
Los datos del experimento:
* Hora de inicio: 11:05
* Hora final : 15:08
* Filamento extruido: cm
* $T: 150ºC$
* $V_{min} tractora: 1.5 mm/s$
* $V_{max} tractora: 3.4 mm/s$
* Los incrementos de velocidades en las reglas del sistema experto son distintas:
* En el caso 5 se pasa de un incremento de velocidad de +1 a un incremento de +2.
End of explanation
"""
datos.ix[:, "Diametro X":"Diametro Y"].plot(figsize=(16,10),ylim=(0.5,3)).hlines([1.85,1.65],0,3500,colors='r')
datos['RPM TRAC'].plot(secondary_y='RPM TRAC')
datos.ix[:, "Diametro X":"Diametro Y"].boxplot(return_type='axes')
"""
Explanation: Representamos ambos diámetro y la velocidad de la tractora en la misma gráfica
End of explanation
"""
plt.scatter(x=datos['Diametro X'], y=datos['Diametro Y'], marker='.')
"""
Explanation: En el boxplot, se ve como la mayoría de los datos están por encima de la media (primer cuartil). Se va a tratar de bajar ese porcentaje. La primera aproximación que vamos a realizar será la de hacer mayores incrementos al subir la velocidad en los tramos que el diámetro se encuentre entre $1.80mm$ y $1.75 mm$(caso 5) haremos incrementos de $d_v2$ en lugar de $d_v1$
Comparativa de Diametro X frente a Diametro Y para ver el ratio del filamento
End of explanation
"""
datos_filtrados = datos[(datos['Diametro X'] >= 0.9) & (datos['Diametro Y'] >= 0.9)]
#datos_filtrados.ix[:, "Diametro X":"Diametro Y"].boxplot(return_type='axes')
"""
Explanation: Filtrado de datos
Las muestras tomadas $d_x >= 0.9$ or $d_y >= 0.9$ las asumimos como error del sensor, por ello las filtramos de las muestras tomadas.
End of explanation
"""
plt.scatter(x=datos_filtrados['Diametro X'], y=datos_filtrados['Diametro Y'], marker='.')
"""
Explanation: Representación de X/Y
End of explanation
"""
ratio = datos_filtrados['Diametro X']/datos_filtrados['Diametro Y']
ratio.describe()
rolling_mean = pd.rolling_mean(ratio, 50)
rolling_std = pd.rolling_std(ratio, 50)
rolling_mean.plot(figsize=(12,6))
# plt.fill_between(ratio, y1=rolling_mean+rolling_std, y2=rolling_mean-rolling_std, alpha=0.5)
ratio.plot(figsize=(12,6), alpha=0.6, ylim=(0.5,1.5))
"""
Explanation: Analizamos datos del ratio
End of explanation
"""
Th_u = 1.85
Th_d = 1.65
data_violations = datos[(datos['Diametro X'] > Th_u) | (datos['Diametro X'] < Th_d) |
(datos['Diametro Y'] > Th_u) | (datos['Diametro Y'] < Th_d)]
data_violations.describe()
data_violations.plot(subplots=True, figsize=(12,12))
"""
Explanation: Límites de calidad
Calculamos el número de veces que traspasamos unos límites de calidad.
$Th^+ = 1.85$ and $Th^- = 1.65$
End of explanation
"""
|
patrick-kidger/diffrax | examples/neural_sde.ipynb | apache-2.0 | from typing import Union
import diffrax
import equinox as eqx # https://github.com/patrick-kidger/equinox
import jax
import jax.nn as jnn
import jax.numpy as jnp
import jax.random as jrandom
import matplotlib.pyplot as plt
import optax # https://github.com/deepmind/optax
"""
Explanation: Neural SDE
This example constructs a neural SDE as a generative time series model.
An SDE is, of course, random: it defines some distribution. Each sample is a whole path. Thus in modern machine learning parlance, an SDE is a generative time series model. This means it can be trained as a GAN, for example. This does mean we need a discriminator that consumes a path as an input; we use a CDE.
Training an SDE as a GAN is precisely what this example does. Doing so will reproduce the following toy example, which is trained on irregularly-sampled time series:
References:
Training SDEs as GANs:
bibtex
@inproceedings{kidger2021sde1,
title={{N}eural {SDE}s as {I}nfinite-{D}imensional {GAN}s},
author={Kidger, Patrick and Foster, James and Li, Xuechen and Lyons, Terry J},
booktitle = {Proceedings of the 38th International Conference on Machine Learning},
pages = {5453--5463},
year = {2021},
volume = {139},
series = {Proceedings of Machine Learning Research},
publisher = {PMLR},
}
Improved training techniques:
bibtex
@incollection{kidger2021sde2,
title={{E}fficient and {A}ccurate {G}radients for {N}eural {SDE}s},
author={Kidger, Patrick and Foster, James and Li, Xuechen and Lyons, Terry},
booktitle = {Advances in Neural Information Processing Systems 34},
year = {2021},
publisher = {Curran Associates, Inc.},
}
This example is available as a Jupyter notebook here.
!!! warning
This example will need a GPU to run efficiently.
!!! danger "Advanced example"
This is a pretty advanced example.
End of explanation
"""
def lipswish(x):
return 0.909 * jnn.silu(x)
"""
Explanation: LipSwish activation functions are a good choice for the discriminator of an SDE-GAN. (Their use here was introduced in the second reference above.)
For simplicity we will actually use LipSwish activations everywhere, even in the generator.
End of explanation
"""
class VectorField(eqx.Module):
scale: Union[int, jnp.ndarray]
mlp: eqx.nn.MLP
def __init__(self, hidden_size, width_size, depth, scale, *, key, **kwargs):
super().__init__(**kwargs)
scale_key, mlp_key = jrandom.split(key)
if scale:
self.scale = jrandom.uniform(
scale_key, (hidden_size,), minval=0.9, maxval=1.1
)
else:
self.scale = 1
self.mlp = eqx.nn.MLP(
in_size=hidden_size + 1,
out_size=hidden_size,
width_size=width_size,
depth=depth,
activation=lipswish,
final_activation=jnn.tanh,
key=mlp_key,
)
def __call__(self, t, y, args):
return self.scale * self.mlp(jnp.concatenate([t[None], y]))
class ControlledVectorField(eqx.Module):
scale: Union[int, jnp.ndarray]
mlp: eqx.nn.MLP
control_size: int
hidden_size: int
def __init__(
self, control_size, hidden_size, width_size, depth, scale, *, key, **kwargs
):
super().__init__(**kwargs)
scale_key, mlp_key = jrandom.split(key)
if scale:
self.scale = jrandom.uniform(
scale_key, (hidden_size, control_size), minval=0.9, maxval=1.1
)
else:
self.scale = 1
self.mlp = eqx.nn.MLP(
in_size=hidden_size + 1,
out_size=hidden_size * control_size,
width_size=width_size,
depth=depth,
activation=lipswish,
final_activation=jnn.tanh,
key=mlp_key,
)
self.control_size = control_size
self.hidden_size = hidden_size
def __call__(self, t, y, args):
return self.scale * self.mlp(jnp.concatenate([t[None], y])).reshape(
self.hidden_size, self.control_size
)
"""
Explanation: Now set up the vector fields appearing on the right hand side of each differential equation.
End of explanation
"""
class NeuralSDE(eqx.Module):
initial: eqx.nn.MLP
vf: VectorField # drift
cvf: ControlledVectorField # diffusion
readout: eqx.nn.Linear
initial_noise_size: int
noise_size: int
def __init__(
self,
data_size,
initial_noise_size,
noise_size,
hidden_size,
width_size,
depth,
*,
key,
**kwargs,
):
super().__init__(**kwargs)
initial_key, vf_key, cvf_key, readout_key = jrandom.split(key, 4)
self.initial = eqx.nn.MLP(
initial_noise_size, hidden_size, width_size, depth, key=initial_key
)
self.vf = VectorField(hidden_size, width_size, depth, scale=True, key=vf_key)
self.cvf = ControlledVectorField(
noise_size, hidden_size, width_size, depth, scale=True, key=cvf_key
)
self.readout = eqx.nn.Linear(hidden_size, data_size, key=readout_key)
self.initial_noise_size = initial_noise_size
self.noise_size = noise_size
def __call__(self, ts, *, key):
t0 = ts[0]
t1 = ts[-1]
dt0 = 1.0
init_key, bm_key = jrandom.split(key, 2)
init = jrandom.normal(init_key, (self.initial_noise_size,))
control = diffrax.VirtualBrownianTree(
t0=t0, t1=t1, tol=dt0 / 2, shape=(self.noise_size,), key=bm_key
)
vf = diffrax.ODETerm(self.vf) # Drift term
cvf = diffrax.ControlTerm(self.cvf, control) # Diffusion term
terms = diffrax.MultiTerm(vf, cvf)
# ReversibleHeun is a cheap choice of SDE solver. We could also use Euler etc.
solver = diffrax.ReversibleHeun()
y0 = self.initial(init)
saveat = diffrax.SaveAt(ts=ts)
# We happen to know from our dataset that we're not going to take many steps.
# Specifying a smallest-possible upper bound speeds things up.
sol = diffrax.diffeqsolve(
terms, solver, t0, t1, dt0, y0, saveat=saveat, max_steps=64
)
return jax.vmap(self.readout)(sol.ys)
class NeuralCDE(eqx.Module):
initial: eqx.nn.MLP
vf: VectorField
cvf: ControlledVectorField
readout: eqx.nn.Linear
def __init__(self, data_size, hidden_size, width_size, depth, *, key, **kwargs):
super().__init__(**kwargs)
initial_key, vf_key, cvf_key, readout_key = jrandom.split(key, 4)
self.initial = eqx.nn.MLP(
data_size + 1, hidden_size, width_size, depth, key=initial_key
)
self.vf = VectorField(hidden_size, width_size, depth, scale=False, key=vf_key)
self.cvf = ControlledVectorField(
data_size, hidden_size, width_size, depth, scale=False, key=cvf_key
)
self.readout = eqx.nn.Linear(hidden_size, 1, key=readout_key)
def __call__(self, ts, ys):
# Interpolate data into a continuous path.
ys = diffrax.linear_interpolation(
ts, ys, replace_nans_at_start=0.0, fill_forward_nans_at_end=True
)
init = jnp.concatenate([ts[0, None], ys[0]])
control = diffrax.LinearInterpolation(ts, ys)
vf = diffrax.ODETerm(self.vf)
cvf = diffrax.ControlTerm(self.cvf, control)
terms = diffrax.MultiTerm(vf, cvf)
solver = diffrax.ReversibleHeun()
t0 = ts[0]
t1 = ts[-1]
dt0 = 1.0
y0 = self.initial(init)
# Have the discriminator produce an output at both `t0` *and* `t1`.
# The output at `t0` has only seen the initial point of a sample. This gives
# additional supervision to the distribution learnt for the initial condition.
# The output at `t1` has seen the entire path of a sample. This is needed to
# actually learn the evolving trajectory.
saveat = diffrax.SaveAt(t0=True, t1=True)
sol = diffrax.diffeqsolve(
terms, solver, t0, t1, dt0, y0, saveat=saveat, max_steps=64
)
return jax.vmap(self.readout)(sol.ys)
@eqx.filter_jit
def clip_weights(self):
leaves, treedef = jax.tree_flatten(
self, is_leaf=lambda x: isinstance(x, eqx.nn.Linear)
)
new_leaves = []
for leaf in leaves:
if isinstance(leaf, eqx.nn.Linear):
lim = 1 / leaf.out_features
leaf = eqx.tree_at(
lambda x: x.weight, leaf, leaf.weight.clip(-lim, lim)
)
new_leaves.append(leaf)
return jax.tree_unflatten(treedef, new_leaves)
"""
Explanation: Now set up the neural SDE (the generator) and the neural CDE (the discriminator).
Note the use of very large step sizes. By using a large step size we essentially "bake in" the discretisation. This is quite a standard thing to do to decrease computational costs, when the vector field is a pure neural network. (You can reduce the step size here if you want to -- which will increase the computational cost, of course.)
Note the clip_weights method on the CDE -- this is part of imposing the Lipschitz condition on the discriminator of a Wasserstein GAN.
(The other thing doing this is the use of those LipSwish activation functions we saw earlier)
End of explanation
"""
@jax.jit
@jax.vmap
def get_data(key):
bm_key, y0_key, drop_key = jrandom.split(key, 3)
mu = 0.02
theta = 0.1
sigma = 0.4
t0 = 0
t1 = 63
t_size = 64
def drift(t, y, args):
return mu * t - theta * y
def diffusion(t, y, args):
return 2 * sigma * t / t1
bm = diffrax.UnsafeBrownianPath(shape=(), key=bm_key)
drift = diffrax.ODETerm(drift)
diffusion = diffrax.ControlTerm(diffusion, bm)
terms = diffrax.MultiTerm(drift, diffusion)
solver = diffrax.Euler()
dt0 = 0.1
y0 = jrandom.uniform(y0_key, (1,), minval=-1, maxval=1)
ts = jnp.linspace(t0, t1, t_size)
saveat = diffrax.SaveAt(ts=ts)
sol = diffrax.diffeqsolve(
terms, solver, t0, t1, dt0, y0, saveat=saveat, adjoint=diffrax.NoAdjoint()
)
# Make the data irregularly sampled
to_drop = jrandom.bernoulli(drop_key, 0.3, (t_size, 1))
ys = jnp.where(to_drop, jnp.nan, sol.ys)
return ts, ys
def dataloader(arrays, batch_size, loop, *, key):
dataset_size = arrays[0].shape[0]
assert all(array.shape[0] == dataset_size for array in arrays)
indices = jnp.arange(dataset_size)
while True:
perm = jrandom.permutation(key, indices)
key = jrandom.split(key, 1)[0]
start = 0
end = batch_size
while end < dataset_size:
batch_perm = perm[start:end]
yield tuple(array[batch_perm] for array in arrays)
start = end
end = start + batch_size
if not loop:
break
"""
Explanation: Next, the dataset. This follows the trajectories you can see in the picture above. (Namely positive drift with mean-reversion and time-dependent diffusion.)
End of explanation
"""
@eqx.filter_jit
def loss(generator, discriminator, ts_i, ys_i, key, step=0):
batch_size, _ = ts_i.shape
key = jrandom.fold_in(key, step)
key = jrandom.split(key, batch_size)
fake_ys_i = jax.vmap(generator)(ts_i, key=key)
real_score = jax.vmap(discriminator)(ts_i, ys_i)
fake_score = jax.vmap(discriminator)(ts_i, fake_ys_i)
return jnp.mean(real_score - fake_score)
@eqx.filter_grad
def grad_loss(g_d, ts_i, ys_i, key, step):
generator, discriminator = g_d
return loss(generator, discriminator, ts_i, ys_i, key, step)
def increase_update_initial(updates):
get_initial_leaves = lambda u: jax.tree_leaves(u.initial)
return eqx.tree_at(get_initial_leaves, updates, replace_fn=lambda x: x * 10)
@eqx.filter_jit
def make_step(
generator,
discriminator,
g_opt_state,
d_opt_state,
g_optim,
d_optim,
ts_i,
ys_i,
key,
step,
):
g_grad, d_grad = grad_loss((generator, discriminator), ts_i, ys_i, key, step)
g_updates, g_opt_state = g_optim.update(g_grad, g_opt_state)
d_updates, d_opt_state = d_optim.update(d_grad, d_opt_state)
g_updates = increase_update_initial(g_updates)
d_updates = increase_update_initial(d_updates)
generator = eqx.apply_updates(generator, g_updates)
discriminator = eqx.apply_updates(discriminator, d_updates)
discriminator = discriminator.clip_weights()
return generator, discriminator, g_opt_state, d_opt_state
"""
Explanation: Now the usual training step for GAN training.
There is one neural-SDE-specific trick here: we increase the update size (i.e. the learning rate) for those parameters describing (and discriminating) the initial condition of the SDE. Otherwise the model tends to focus just on fitting just the rest of the data (i.e. the random evolution over time).
End of explanation
"""
def main(
initial_noise_size=5,
noise_size=3,
hidden_size=16,
width_size=16,
depth=1,
generator_lr=2e-5,
discriminator_lr=1e-4,
batch_size=1024,
steps=10000,
steps_per_print=200,
dataset_size=8192,
seed=5678,
):
key = jrandom.PRNGKey(seed)
(
data_key,
generator_key,
discriminator_key,
dataloader_key,
train_key,
evaluate_key,
sample_key,
) = jrandom.split(key, 7)
data_key = jrandom.split(data_key, dataset_size)
ts, ys = get_data(data_key)
_, _, data_size = ys.shape
generator = NeuralSDE(
data_size,
initial_noise_size,
noise_size,
hidden_size,
width_size,
depth,
key=generator_key,
)
discriminator = NeuralCDE(
data_size, hidden_size, width_size, depth, key=discriminator_key
)
g_optim = optax.rmsprop(generator_lr)
d_optim = optax.rmsprop(-discriminator_lr)
g_opt_state = g_optim.init(eqx.filter(generator, eqx.is_inexact_array))
d_opt_state = d_optim.init(eqx.filter(discriminator, eqx.is_inexact_array))
infinite_dataloader = dataloader(
(ts, ys), batch_size, loop=True, key=dataloader_key
)
for step, (ts_i, ys_i) in zip(range(steps), infinite_dataloader):
step = jnp.asarray(step)
generator, discriminator, g_opt_state, d_opt_state = make_step(
generator,
discriminator,
g_opt_state,
d_opt_state,
g_optim,
d_optim,
ts_i,
ys_i,
key,
step,
)
if (step % steps_per_print) == 0 or step == steps - 1:
total_score = 0
num_batches = 0
for ts_i, ys_i in dataloader(
(ts, ys), batch_size, loop=False, key=evaluate_key
):
score = loss(generator, discriminator, ts_i, ys_i, sample_key)
total_score += score.item()
num_batches += 1
print(f"Step: {step}, Loss: {total_score / num_batches}")
# Plot samples
fig, ax = plt.subplots()
num_samples = min(50, dataset_size)
ts_to_plot = ts[:num_samples]
ys_to_plot = ys[:num_samples]
def _interp(ti, yi):
return diffrax.linear_interpolation(
ti, yi, replace_nans_at_start=0.0, fill_forward_nans_at_end=True
)
ys_to_plot = jax.vmap(_interp)(ts_to_plot, ys_to_plot)[..., 0]
ys_sampled = jax.vmap(generator)(
ts_to_plot, key=jrandom.split(sample_key, num_samples)
)[..., 0]
kwargs = dict(label="Real")
for ti, yi in zip(ts_to_plot, ys_to_plot):
ax.plot(ti, yi, c="dodgerblue", linewidth=0.5, alpha=0.7, **kwargs)
kwargs = {}
kwargs = dict(label="Generated")
for ti, yi in zip(ts_to_plot, ys_sampled):
ax.plot(ti, yi, c="crimson", linewidth=0.5, alpha=0.7, **kwargs)
kwargs = {}
ax.set_title(f"{num_samples} samples from both real and generated distributions.")
fig.legend()
fig.tight_layout()
fig.savefig("neural_sde.png")
plt.show()
main()
"""
Explanation: This is our main entry point. Try running main().
End of explanation
"""
|
tensorflow/examples | courses/udacity_intro_to_tensorflow_lite/tflite_c05_exercise_rock_paper_scissors.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2018 The TensorFlow Authors.
End of explanation
"""
import os
import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print("Hub version: ", hub.__version__)
print("GPU is", "available" if tf.config.list_physical_devices('GPU') else "NOT AVAILABLE")
"""
Explanation: Rock, Paper & Scissors with TensorFlow Hub - TFLite
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_lite/tflite_c05_exercise_rock_paper_scissors.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_lite/tflite_c05_exercise_rock_paper_scissors.ipynb">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub</a>
</td>
</table>
Setup
End of explanation
"""
module_selection = ("mobilenet_v2", 224, 1280) #@param ["(\"mobilenet_v2\", 224, 1280)", "(\"inception_v3\", 299, 2048)"] {type:"raw", allow-input: true}
handle_base, pixels, FV_SIZE = module_selection
MODULE_HANDLE ="https://tfhub.dev/google/tf2-preview/{}/feature_vector/4".format(handle_base)
IMAGE_SIZE = (pixels, pixels)
print("Using {} with input size {} and output dimension {}".format(
MODULE_HANDLE, IMAGE_SIZE, FV_SIZE))
"""
Explanation: Select the Hub/TF2 module to use
Hub modules for TF 1.x won't work here, please use one of the selections provided.
End of explanation
"""
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
"""
Explanation: Data preprocessing
Use TensorFlow Datasets to load the rock, paper and scissors dataset.
This tfds package is the easiest way to load pre-defined data. If you have your own data, and are interested in importing using it with TensorFlow see loading image data
End of explanation
"""
splits = tfds.Split.ALL.subsplit(weighted=(80, 10, 10))
# Go to the TensorFlow Dataset's website and search for the Rock, Paper, Scissors dataset and load it here
splits, info = tfds.load( # YOUR CODE HERE )
# Save the dataset splits in a tuple
(train_examples, validation_examples, test_examples) = splits
num_examples = info.splits['train'].num_examples
num_classes = info.features['label'].num_classes
"""
Explanation: The tfds.load method downloads and caches the data, and returns a tf.data.Dataset object. These objects provide powerful, efficient methods for manipulating data and piping it into your model.
Since "rock_paper_scissors" doesn't define standard splits, use the subsplit feature to divide it into (train, validation, test) with 80%, 10%, 10% of the data respectively.
End of explanation
"""
def format_image(image, label):
image = tf.image.resize(image, IMAGE_SIZE) / 255.0
return image, label
"""
Explanation: Format the Data
Use the tf.image module to format the images for the task.
Resize the images to a fixes input size, and rescale the input channels
End of explanation
"""
BATCH_SIZE = 32 #@param {type:"integer"}
# Prepare the examples by preprocessing them and then batching them (and optionally prefetching them)
# If you wish you can shuffle train set here
train_batches = # YOUR CODE HERE
validation_batches = # YOUR CODE HERE
test_batches = # YOUR CODE HERE
"""
Explanation: Now shuffle and batch the data
End of explanation
"""
for image_batch, label_batch in train_batches.take(1):
pass
image_batch.shape
"""
Explanation: Inspect a batch
End of explanation
"""
do_fine_tuning = False #@param {type:"boolean"}
# Build the model with a TFHub KerasLayer and attach a classification head to it
print("Building model with", MODULE_HANDLE)
model = tf.keras.Sequential([
hub.KerasLayer(MODULE_HANDLE,
input_shape=IMAGE_SIZE + (3, ),
output_shape=[FV_SIZE],
trainable=do_fine_tuning),
tf.keras.layers.Dense(num_classes)
])
model.summary()
"""
Explanation: Defining the model
All it takes is to put a linear classifier on top of the feature_extractor_layer with the Hub module.
For speed, we start out with a non-trainable feature_extractor_layer, but you can also enable fine-tuning for greater accuracy.
End of explanation
"""
if do_fine_tuning:
model.compile(
optimizer=tf.keras.optimizers.SGD(lr=0.002, momentum=0.9),
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
else:
model.compile(
optimizer='adam',
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
EPOCHS = 3
hist = model.fit(train_batches,
epochs=EPOCHS,
validation_data=validation_batches)
"""
Explanation: Training the model
End of explanation
"""
RPS_SAVED_MODEL = "exp_saved_model"
"""
Explanation: Export the model
End of explanation
"""
# Use TensorFlow's SavedModel API to export the SavedModel from the trained Keras model
# YOUR CODE HERE
"""
Explanation: Export the SavedModel
End of explanation
"""
%%bash -s $RPS_SAVED_MODEL
saved_model_cli show --dir $1 --tag_set serve --signature_def serving_default
loaded = tf.saved_model.load(RPS_SAVED_MODEL)
print(list(loaded.signatures.keys()))
infer = loaded.signatures["serving_default"]
print(infer.structured_input_signature)
print(infer.structured_outputs)
"""
Explanation: Here you can verify the default signature of your exported SavedModel
End of explanation
"""
# Intialize the TFLite converter to load the SavedModel
converter = # YOUR CODE HERE
# Set the optimization strategy for 'size' in the converter
converter.optimizations = [# YOUR CODE HERE]
# Use the tool to finally convert the model
tflite_model = # YOUR CODE HERE
with open("converted_model.tflite", "wb") as f:
f.write(tflite_model)
"""
Explanation: Convert with TFLiteConverter
End of explanation
"""
#@title Loading the converted TFLite model...
# Load TFLite model and allocate tensors.
tflite_model_file = 'converted_model.tflite'
with open(tflite_model_file, 'rb') as fid:
tflite_model = fid.read()
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
#@title Testing on random test examples...
from tqdm import tqdm
# Gather results for the randomly sampled test images
predictions = []
test_labels, test_imgs = [], []
for img, label in tqdm(test_batches.take(10)):
interpreter.set_tensor(input_index, img)
interpreter.invoke()
predictions.append(interpreter.get_tensor(output_index))
test_labels.append(label.numpy()[0])
test_imgs.append(img)
#@title Utility functions for plotting
# Utilities for plotting
class_names = ['rock', 'paper', 'scissors']
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
img = np.squeeze(img)
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'green'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
#@title Visualize the outputs { run: "auto" }
index = 9 #@param {type:"slider", min:0, max:9, step:1}
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(index, predictions, test_labels, test_imgs)
plt.show()
"""
Explanation: Run the following cells to check whether your TFLite model is working using the Python Interpreter
End of explanation
"""
with open('labels.txt', 'w') as f:
f.write('\n'.join(class_names))
try:
from google.colab import files
files.download('converted_model.tflite')
files.download('labels.txt')
except:
pass
"""
Explanation: Download the model
NOTE: You might have to run to the cell below twice
End of explanation
"""
!mkdir -p test_images
from PIL import Image
for index, (image, label) in enumerate(test_batches.take(50)):
image = tf.cast(image * 255.0, tf.uint8)
image = tf.squeeze(image).numpy()
pil_image = Image.fromarray(image)
pil_image.save('test_images/{}_{}.jpg'.format(class_names[label[0]], index))
!ls test_images
!zip -qq rps_test_images.zip -r test_images/
try:
files.download('rps_test_images.zip')
except:
pass
"""
Explanation: Prepare the test images for download (Optional)
This part involves downloading additional test images for the Mobile Apps only in case you need to try out more samples
End of explanation
"""
|
ekostat/ekostat_calculator | notebooks/lv_notebook_kustzon.ipynb | mit | root_directory = 'D:/github/w_vattenstatus/ekostat_calculator'#"../" #os.getcwd()
workspace_directory = root_directory + '/workspaces'
resource_directory = root_directory + '/resources'
#alias = 'lena'
user_id = 'test_user' #kanske ska vara off_line user?
# workspace_alias = 'lena_indicator' # kustzonsmodellen_3daydata
workspace_alias = 'kustzonsmodellen_3daydata'
# ## Initiate EventHandler
print(root_directory)
paths = {'user_id': user_id,
'workspace_directory': root_directory + '/workspaces',
'resource_directory': root_directory + '/resources',
'log_directory': 'D:/github' + '/log',
'test_data_directory': 'D:/github' + '/test_data',
'cache_directory': 'D:/github/w_vattenstatus/cache'}
t0 = time.time()
ekos = EventHandler(**paths)
#request = ekos.test_requests['request_workspace_list']
#response = ekos.request_workspace_list(request)
#ekos.write_test_response('request_workspace_list', response)
print('-'*50)
print('Time for request: {}'.format(time.time()-t0))
###############################################################################################################################
# ### Make a new workspace
# ekos.copy_workspace(source_uuid='default_workspace', target_alias='kustzonsmodellen_3daydata')
# ### See existing workspaces and choose workspace name to load
ekos.print_workspaces()
workspace_uuid = ekos.get_unique_id_for_alias(workspace_alias = workspace_alias) #'kuszonsmodellen' lena_indicator
print(workspace_uuid)
workspace_alias = ekos.get_alias_for_unique_id(workspace_uuid = workspace_uuid)
###############################################################################################################################
# ### Load existing workspace
ekos.load_workspace(unique_id = workspace_uuid)
###############################################################################################################################
# ### import data
# ekos.import_default_data(workspace_alias = workspace_alias)
###############################################################################################################################
# ### Load all data in workspace
# #### if there is old data that you want to remove
ekos.get_workspace(workspace_uuid = workspace_uuid).delete_alldata_export()
ekos.get_workspace(workspace_uuid = workspace_uuid).delete_all_export_data()
###############################################################################################################################
# #### to just load existing data in workspace
ekos.load_data(workspace_uuid = workspace_uuid)
###############################################################################################################################
# ### check workspace data length
w = ekos.get_workspace(workspace_uuid = workspace_uuid)
len(w.data_handler.get_all_column_data_df())
###############################################################################################################################
# ### see subsets in data
for subset_uuid in w.get_subset_list():
print('uuid {} alias {}'.format(subset_uuid, w.uuid_mapping.get_alias(unique_id=subset_uuid)))
###############################################################################################################################
# # Step 0
print(w.data_handler.all_data.columns)
###############################################################################################################################
# ### Apply first data filter
w.apply_data_filter(step = 0) # This sets the first level of data filter in the IndexHandler
###############################################################################################################################
# # Step 1
# ### make new subset
# w.copy_subset(source_uuid='default_subset', target_alias='test_kustzon')
###############################################################################################################################
# ### Choose subset name to load
subset_alias = 'test_kustzon'
# subset_alias = 'period_2007-2012_refvalues_2013'
# subset_alias = 'test_subset'
subset_uuid = ekos.get_unique_id_for_alias(workspace_alias = workspace_alias, subset_alias = subset_alias)
print('subset_alias', subset_alias, 'subset_uuid', subset_uuid)
"""
Explanation: Load directories
End of explanation
"""
# #### year filter
w.set_data_filter(subset = subset_uuid, step=1,
filter_type='include_list',
filter_name='MYEAR',
data=[2007,2008,2009,2010,2011,2012])#['2011', '2012', '2013']) #, 2014, 2015, 2016
###############################################################################################################################
# #### waterbody filter
w.set_data_filter(subset = subset_uuid, step=1,
filter_type='include_list',
filter_name='viss_eu_cd', data = []) #'SE584340-174401', 'SE581700-113000', 'SE654470-222700', 'SE633000-195000', 'SE625180-181655'
# data=['SE584340-174401', 'SE581700-113000', 'SE654470-222700', 'SE633000-195000', 'SE625180-181655'])
# wb with no data for din 'SE591400-182320'
f1 = w.get_data_filter_object(subset = subset_uuid, step=1)
print(f1.include_list_filter)
print('subset_alias:', subset_alias, '\nsubset uuid:', subset_uuid)
f1 = w.get_data_filter_object(subset = subset_uuid, step=1)
print(f1.include_list_filter)
###############################################################################################################################
# ## Apply step 1 datafilter to subset
w.apply_data_filter(subset = subset_uuid, step = 1)
filtered_data = w.get_filtered_data(step = 1, subset = subset_uuid)
print(filtered_data['VISS_EU_CD'].unique())
filtered_data[['AMON','NTRA','DIN','CPHL_INTEG_CALC','DEPH']].head()
"""
Explanation: Set subset filters
End of explanation
"""
### Load indicator settings filter
w.get_step_object(step = 2, subset = subset_uuid).load_indicator_settings_filters()
###############################################################################################################################
### set available indicators
w.get_available_indicators(subset= subset_uuid, step=2)
###############################################################################################################################
# ### choose indicators
#list(zip(typeA_list, df_step1.WATER_TYPE_AREA.unique()))
# indicator_list = ['oxygen','din_winter','ntot_summer', 'ntot_winter', 'dip_winter', 'ptot_summer', 'ptot_winter','bqi', 'biov', 'chl', 'secchi']
# indicator_list = ['din_winter','ntot_summer', 'ntot_winter', 'dip_winter', 'ptot_summer', 'ptot_winter']
#indicator_list = ['biov', 'chl']
# indicator_list = ['bqi', 'biov', 'chl', 'secchi']
#indicator_list = ['bqi', 'secchi'] + ['biov', 'chl'] + ['din_winter']
# indicator_list = ['din_winter','ntot_summer']
# indicator_list = ['indicator_' + indicator for indicator in indicator_list]
indicator_list = w.available_indicators
###############################################################################################################################
# ### Apply indicator data filter
print('apply indicator data filter to {}'.format(indicator_list))
for indicator in indicator_list:
w.apply_indicator_data_filter(step = 2,
subset = subset_uuid,
indicator = indicator)#,
# water_body_list = test_wb)
#print(w.mapping_objects['water_body'][wb])
#print('*************************************')
#df = w.get_filtered_data(subset = subset_uuid, step = 'step_2', water_body = 'SE625180-181655', indicator = 'indicator_din_winter').dropna(subset = ['DIN'])
"""
Explanation: #########################################################################################################################
Step 2
End of explanation
"""
# ### Set up indicator objects
print('indicator set up to {}'.format(indicator_list))
w.get_step_object(step = 3, subset = subset_uuid).indicator_setup(indicator_list = indicator_list)
###############################################################################################################################
# ### CALCULATE STATUS
print('CALCULATE STATUS to {}'.format(indicator_list))
w.get_step_object(step = 3, subset = subset_uuid).calculate_status(indicator_list = indicator_list)
###############################################################################################################################
# ### CALCULATE QUALITY ELEMENTS
w.get_step_object(step = 3, subset = subset_uuid).calculate_quality_element(quality_element = 'nutrients')
# w.get_step_object(step = 3, subset = subset_uuid).calculate_quality_element(quality_element = 'phytoplankton')
# w.get_step_object(step = 3, subset = subset_uuid).calculate_quality_element(quality_element = 'bottomfauna')
# w.get_step_object(step = 3, subset = subset_uuid).calculate_quality_element(quality_element = 'oxygen')
# w.get_step_object(step = 3, subset = subset_uuid).calculate_quality_element(quality_element = 'secchi')
# w.get_step_object(step = 3, subset = subset_uuid).calculate_quality_element(subset_unique_id = subset_uuid, quality_element = 'Phytoplankton')
"""
Explanation: #########################################################################################################################
Step 3
End of explanation
"""
|
ZhangXinNan/tensorflow | tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb | apache-2.0 | from __future__ import absolute_import, division, print_function
# Import TensorFlow >= 1.10 and enable eager execution
import tensorflow as tf
tf.enable_eager_execution()
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import unicodedata
import re
import numpy as np
import os
import time
print(tf.__version__)
"""
Explanation: Copyright 2018 The TensorFlow Authors.
Licensed under the Apache License, Version 2.0 (the "License").
Neural Machine Translation with Attention
<table class="tfo-notebook-buttons" align="left"><td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td><td>
<a target="_blank" href="https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a></td></table>
This notebook trains a sequence to sequence (seq2seq) model for Spanish to English translation using tf.keras and eager execution. This is an advanced example that assumes some knowledge of sequence to sequence models.
After training the model in this notebook, you will be able to input a Spanish sentence, such as "¿todavia estan en casa?", and return the English translation: "are you still at home?"
The translation quality is reasonable for a toy example, but the generated attention plot is perhaps more interesting. This shows which parts of the input sentence has the model's attention while translating:
<img src="https://tensorflow.org/images/spanish-english.png" alt="spanish-english attention plot">
Note: This example takes approximately 10 mintues to run on a single P100 GPU.
End of explanation
"""
# Download the file
path_to_zip = tf.keras.utils.get_file(
'spa-eng.zip', origin='http://download.tensorflow.org/data/spa-eng.zip',
extract=True)
path_to_file = os.path.dirname(path_to_zip)+"/spa-eng/spa.txt"
# Converts the unicode file to ascii
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def preprocess_sentence(w):
w = unicode_to_ascii(w.lower().strip())
# creating a space between a word and the punctuation following it
# eg: "he is a boy." => "he is a boy ."
# Reference:- https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation
w = re.sub(r"([?.!,¿])", r" \1 ", w)
w = re.sub(r'[" "]+', " ", w)
# replacing everything with space except (a-z, A-Z, ".", "?", "!", ",")
w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w)
w = w.rstrip().strip()
# adding a start and an end token to the sentence
# so that the model know when to start and stop predicting.
w = '<start> ' + w + ' <end>'
return w
# 1. Remove the accents
# 2. Clean the sentences
# 3. Return word pairs in the format: [ENGLISH, SPANISH]
def create_dataset(path, num_examples):
lines = open(path, encoding='UTF-8').read().strip().split('\n')
word_pairs = [[preprocess_sentence(w) for w in l.split('\t')] for l in lines[:num_examples]]
return word_pairs
# This class creates a word -> index mapping (e.g,. "dad" -> 5) and vice-versa
# (e.g., 5 -> "dad") for each language,
class LanguageIndex():
def __init__(self, lang):
self.lang = lang
self.word2idx = {}
self.idx2word = {}
self.vocab = set()
self.create_index()
def create_index(self):
for phrase in self.lang:
self.vocab.update(phrase.split(' '))
self.vocab = sorted(self.vocab)
self.word2idx['<pad>'] = 0
for index, word in enumerate(self.vocab):
self.word2idx[word] = index + 1
for word, index in self.word2idx.items():
self.idx2word[index] = word
def max_length(tensor):
return max(len(t) for t in tensor)
def load_dataset(path, num_examples):
# creating cleaned input, output pairs
pairs = create_dataset(path, num_examples)
# index language using the class defined above
inp_lang = LanguageIndex(sp for en, sp in pairs)
targ_lang = LanguageIndex(en for en, sp in pairs)
# Vectorize the input and target languages
# Spanish sentences
input_tensor = [[inp_lang.word2idx[s] for s in sp.split(' ')] for en, sp in pairs]
# English sentences
target_tensor = [[targ_lang.word2idx[s] for s in en.split(' ')] for en, sp in pairs]
# Calculate max_length of input and output tensor
# Here, we'll set those to the longest sentence in the dataset
max_length_inp, max_length_tar = max_length(input_tensor), max_length(target_tensor)
# Padding the input and output tensor to the maximum length
input_tensor = tf.keras.preprocessing.sequence.pad_sequences(input_tensor,
maxlen=max_length_inp,
padding='post')
target_tensor = tf.keras.preprocessing.sequence.pad_sequences(target_tensor,
maxlen=max_length_tar,
padding='post')
return input_tensor, target_tensor, inp_lang, targ_lang, max_length_inp, max_length_tar
"""
Explanation: Download and prepare the dataset
We'll use a language dataset provided by http://www.manythings.org/anki/. This dataset contains language translation pairs in the format:
May I borrow this book? ¿Puedo tomar prestado este libro?
There are a variety of languages available, but we'll use the English-Spanish dataset. For convenience, we've hosted a copy of this dataset on Google Cloud, but you can also download your own copy. After downloading the dataset, here are the steps we'll take to prepare the data:
Add a start and end token to each sentence.
Clean the sentences by removing special characters.
Create a word index and reverse word index (dictionaries mapping from word → id and id → word).
Pad each sentence to a maximum length.
End of explanation
"""
# Try experimenting with the size of that dataset
num_examples = 30000
input_tensor, target_tensor, inp_lang, targ_lang, max_length_inp, max_length_targ = load_dataset(path_to_file, num_examples)
# Creating training and validation sets using an 80-20 split
input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2)
# Show length
len(input_tensor_train), len(target_tensor_train), len(input_tensor_val), len(target_tensor_val)
"""
Explanation: Limit the size of the dataset to experiment faster (optional)
Training on the complete dataset of >100,000 sentences will take a long time. To train faster, we can limit the size of the dataset to 30,000 sentences (of course, translation quality degrades with less data):
End of explanation
"""
BUFFER_SIZE = len(input_tensor_train)
BATCH_SIZE = 64
N_BATCH = BUFFER_SIZE//BATCH_SIZE
embedding_dim = 256
units = 1024
vocab_inp_size = len(inp_lang.word2idx)
vocab_tar_size = len(targ_lang.word2idx)
dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
"""
Explanation: Create a tf.data dataset
End of explanation
"""
def gru(units):
# If you have a GPU, we recommend using CuDNNGRU(provides a 3x speedup than GRU)
# the code automatically does that.
if tf.test.is_gpu_available():
return tf.keras.layers.CuDNNGRU(units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
else:
return tf.keras.layers.GRU(units,
return_sequences=True,
return_state=True,
recurrent_activation='sigmoid',
recurrent_initializer='glorot_uniform')
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(self.enc_units)
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state = hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(self.dec_units)
self.fc = tf.keras.layers.Dense(vocab_size)
# used for attention
self.W1 = tf.keras.layers.Dense(self.dec_units)
self.W2 = tf.keras.layers.Dense(self.dec_units)
self.V = tf.keras.layers.Dense(1)
def call(self, x, hidden, enc_output):
# enc_output shape == (batch_size, max_length, hidden_size)
# hidden shape == (batch_size, hidden size)
# hidden_with_time_axis shape == (batch_size, 1, hidden size)
# we are doing this to perform addition to calculate the score
hidden_with_time_axis = tf.expand_dims(hidden, 1)
# score shape == (batch_size, max_length, hidden_size)
score = tf.nn.tanh(self.W1(enc_output) + self.W2(hidden_with_time_axis))
# attention_weights shape == (batch_size, max_length, 1)
# we get 1 at the last axis because we are applying score to self.V
attention_weights = tf.nn.softmax(self.V(score), axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * enc_output
context_vector = tf.reduce_sum(context_vector, axis=1)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
# output shape == (batch_size * max_length, hidden_size)
output = tf.reshape(output, (-1, output.shape[2]))
# output shape == (batch_size * max_length, vocab)
x = self.fc(output)
return x, state, attention_weights
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.dec_units))
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)
"""
Explanation: Write the encoder and decoder model
Here, we'll implement an encoder-decoder model with attention which you can read about in the TensorFlow Neural Machine Translation (seq2seq) tutorial. This example uses a more recent set of APIs. This notebook implements the attention equations from the seq2seq tutorial. The following diagram shows that each input words is assigned a weight by the attention mechanism which is then used by the decoder to predict the next word in the sentence.
<img src="https://www.tensorflow.org/images/seq2seq/attention_mechanism.jpg" width="500" alt="attention mechanism">
The input is put through an encoder model which gives us the encoder output of shape (batch_size, max_length, hidden_size) and the encoder hidden state of shape (batch_size, hidden_size).
Here are the equations that are implemented:
<img src="https://www.tensorflow.org/images/seq2seq/attention_equation_0.jpg" alt="attention equation 0" width="800">
<img src="https://www.tensorflow.org/images/seq2seq/attention_equation_1.jpg" alt="attention equation 1" width="800">
We're using Bahdanau attention. Lets decide on notation before writing the simplified form:
FC = Fully connected (dense) layer
EO = Encoder output
H = hidden state
X = input to the decoder
And the pseudo-code:
score = FC(tanh(FC(EO) + FC(H)))
attention weights = softmax(score, axis = 1). Softmax by default is applied on the last axis but here we want to apply it on the 1st axis, since the shape of score is (batch_size, max_length, hidden_size). Max_length is the length of our input. Since we are trying to assign a weight to each input, softmax should be applied on that axis.
context vector = sum(attention weights * EO, axis = 1). Same reason as above for choosing axis as 1.
embedding output = The input to the decoder X is passed through an embedding layer.
merged vector = concat(embedding output, context vector)
This merged vector is then given to the GRU
The shapes of all the vectors at each step have been specified in the comments in the code:
End of explanation
"""
optimizer = tf.train.AdamOptimizer()
def loss_function(real, pred):
mask = 1 - np.equal(real, 0)
loss_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=real, logits=pred) * mask
return tf.reduce_mean(loss_)
"""
Explanation: Define the optimizer and the loss function
End of explanation
"""
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
encoder=encoder,
decoder=decoder)
"""
Explanation: Checkpoints (Object-based saving)
End of explanation
"""
EPOCHS = 10
for epoch in range(EPOCHS):
start = time.time()
hidden = encoder.initialize_hidden_state()
total_loss = 0
for (batch, (inp, targ)) in enumerate(dataset):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = encoder(inp, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word2idx['<start>']] * BATCH_SIZE, 1)
# Teacher forcing - feeding the target as the next input
for t in range(1, targ.shape[1]):
# passing enc_output to the decoder
predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)
loss += loss_function(targ[:, t], predictions)
# using teacher forcing
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = (loss / int(targ.shape[1]))
total_loss += batch_loss
variables = encoder.variables + decoder.variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
if batch % 100 == 0:
print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
batch,
batch_loss.numpy()))
# saving (checkpoint) the model every 2 epochs
if (epoch + 1) % 2 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print('Epoch {} Loss {:.4f}'.format(epoch + 1,
total_loss / N_BATCH))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
"""
Explanation: Training
Pass the input through the encoder which return encoder output and the encoder hidden state.
The encoder output, encoder hidden state and the decoder input (which is the start token) is passed to the decoder.
The decoder returns the predictions and the decoder hidden state.
The decoder hidden state is then passed back into the model and the predictions are used to calculate the loss.
Use teacher forcing to decide the next input to the decoder.
Teacher forcing is the technique where the target word is passed as the next input to the decoder.
The final step is to calculate the gradients and apply it to the optimizer and backpropagate.
End of explanation
"""
def evaluate(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ):
attention_plot = np.zeros((max_length_targ, max_length_inp))
sentence = preprocess_sentence(sentence)
inputs = [inp_lang.word2idx[i] for i in sentence.split(' ')]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs], maxlen=max_length_inp, padding='post')
inputs = tf.convert_to_tensor(inputs)
result = ''
hidden = [tf.zeros((1, units))]
enc_out, enc_hidden = encoder(inputs, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word2idx['<start>']], 0)
for t in range(max_length_targ):
predictions, dec_hidden, attention_weights = decoder(dec_input, dec_hidden, enc_out)
# storing the attention weigths to plot later on
attention_weights = tf.reshape(attention_weights, (-1, ))
attention_plot[t] = attention_weights.numpy()
predicted_id = tf.multinomial(tf.exp(predictions), num_samples=1)[0][0].numpy()
result += targ_lang.idx2word[predicted_id] + ' '
if targ_lang.idx2word[predicted_id] == '<end>':
return result, sentence, attention_plot
# the predicted ID is fed back into the model
dec_input = tf.expand_dims([predicted_id], 0)
return result, sentence, attention_plot
# function for plotting the attention weights
def plot_attention(attention, sentence, predicted_sentence):
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention, cmap='viridis')
fontdict = {'fontsize': 14}
ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90)
ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict)
plt.show()
def translate(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ):
result, sentence, attention_plot = evaluate(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
print('Input: {}'.format(sentence))
print('Predicted translation: {}'.format(result))
attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))]
plot_attention(attention_plot, sentence.split(' '), result.split(' '))
"""
Explanation: Translate
The evaluate function is similar to the training loop, except we don't use teacher forcing here. The input to the decoder at each time step is its previous predictions along with the hidden state and the encoder output.
Stop predicting when the model predicts the end token.
And store the attention weights for every time step.
Note: The encoder output is calculated only once for one input.
End of explanation
"""
# restoring the latest checkpoint in checkpoint_dir
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
translate('hace mucho frio aqui.', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
translate('esta es mi vida.', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
translate('¿todavia estan en casa?', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
# wrong translation
translate('trata de averiguarlo.', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
"""
Explanation: Restore the latest checkpoint and test
End of explanation
"""
|
mathcoding/Programmazione2 | .ipynb_checkpoints/Lab 3 - Cenni di Programmazione Funzionale - Parte seconda-checkpoint.ipynb | mit | # Due modi diversi per definire la stessa funzione:
# Primo: metodo standard, funzione di nome F1
def F1(x):
return x**2
# Secondo: lambda expression a cui si assegna un nome
F2 = lambda x: x**2
F1(3.3) == F2(3.3)
(lambda x,y: x+y)(2,3)
print(type(lambda x: x**2))
"""
Explanation: Elementi a supporto della Programmazione funzionale
Vediamo di seguito quelli che sono gli strumenti più usati per programmare con uno stile elegante, preciso e conciso.
Lambda Expressions
Il primo elemento sono le lambda expressions, chiamate anche anonymous functions. La loro forma generale è del tipo:
<p><center>`lambda <sequenza di nomi di variabili>: <espressione>`</center></p>
Dove lambda è una parola riservata (keyword) di Python. Le lambda expressions sono usate soprattutto quando si devono passare delle funzioni come parametri ad altre funzioni.
ESEMPIO:
End of explanation
"""
map(lambda x: x**2, [1,2,3,4,5,6,7,8,9,10])
list(map(lambda x: x**2, [1,2,3,4,5,6,7,8,9,10]))
"""
Explanation: Funzione map
La funzione map è la prima di una triade di funzioni molto importanti. Le altre due sono filter e reduce.
La funzione map è una funzione che prende in input come primo argomento una funzione di $n$ argomenti e viene poi seguita da $n$ collezioni iterabili della stessa lunghezza (come ad esempio $n$ liste); restituisce in output generator specifico di tipo map object:
<p><center>`map(func, *iterables) --> map object`</center></p>
Per ottenere il risultato bisogna iterare sugli elementi del map object, per esempio costruendo una lista con la funzione list(). Di solito la funzione data come prima argomento prende un solo argomento in input e la funzione viene quindi usata con una sola lista di input. La funzione map è una delle builtins di Python, ma è relativamente facile scrivere una versione usando gli elementi di Python stesso.
ESEMPIO 1: Per calcolare il quadrato di una lista di numeri:
End of explanation
"""
list(map(lambda x, y: x*y, [1,2,3,4,5,6,7,8,9], [9,8,7,6,5,4,3,2,1]))
"""
Explanation: Si osservi che la funzione map ha passato alla funzione list un map object con cui ha costruito la lista data visualizzata in output.
ESEMPIO 2: Per calcolare il prodotto elemento per elemento di due vettori:
End of explanation
"""
# SVOLGERE ESERCIZIO
"""
Explanation: ESERCIZIO 1: Scrivere un'espressione che calcoli il quadrato delle differenze elemento per elemento di due "vettori" dati, ovvero:
$$\sum_{i = 1,..,n} (x_i-y_i)^2, \quad x,y \in \mathbb{R}^n$$
Usare nell'espressione le due liste $[1,2,3,4,5,6,7,8,9]$ e $[9,8,7,6,5,4,3,2,1]$.
End of explanation
"""
list(filter(lambda x: x%2 == 0, {1,2,3,4,5,6,7,8,9}))
"""
Explanation: Funzione filter
La funzione filter letteralmente "filtra" gli elementi: prende in input un predicato (ovvero una funzione booleana, che restituisce True o False) e una sequenza iterabile di elementi, e restituisce in output un filter object su cui iterando si ottiene la sequenza di elementi per cui il predicato è verificato (pari a True):
<p><center>`filter(function or None, iterable) --> filter object`</center></p>
La funzione filter è una delle builtins di Python.
ESEMPIO 3: Filtrare i numeri pari di una lista data.
End of explanation
"""
# SVOLGERE ESERCIZIO
"""
Explanation: ESERCIZIO 2: Calcolare il quadrato dei numeri dispari di una lista data. Suggerimento: utilizzare sia map che filter.
End of explanation
"""
from functools import reduce
reduce(lambda x,y: x+y, [1,2,3,4])
"""
Explanation: List comprehensions
Usare la notazione di list comprehensions può rendere talvolta il codice più compatto e può spostare il focus di chi lo legge dal come si stan facendo i conti al cosa si sta calcolando. Si consideri l'esempio seguente:
collection = list()
for datum in data_set:
if condition(datum):
collection.append(datum)
else:
new = modify(datum)
collection.append(new)
che usando la notazione di list comprehensions si può riscrivere come:
collection = [d if condition(d) else modify(d) for d in data_set]
Funzione reduce
La funzione reduce letteralmente "riduce" una sequenza di elementi ad uno scalare. In programmazione funzionale viene chiamata anche fold. La funzione reduce prende in input una funzione, chiamata "combinante", una sequenza iterabile di elementi e un valore iniziale (facoltativo). In output viene restituito un valore che risulta dall'applicare in sequenza agli elementi della lista la funzione data:
<p><center>`reduce(function, sequence[, initial]) -> value`</center></p>
Per esempio, se viene passata in input la funzione f(x,y), la lista [1,2,3,4] e il valore iniziale 0, la funzione reduce calcola il valore:
$$v = f(f(f(f(0,1), 2), 3), 4)$$
Se la funzione data è la somma, questo risulta equivalente a calcolare:
$$((((0+1)+2)+3)+4) = 10$$
La funzione reduce non è una builtin di Python e deve essere importata con il comando:
from functools import reduce
ESEMPIO 4: Scrivere il codice per l'esempio precedente
End of explanation
"""
from math import sqrt
# SVOLGERE ESERCIZIO
"""
Explanation: ESERCIZIO 3: Scrivere una funzione che calcoli la norma di un vettore $\sqrt{\sum_{i=1,..,n} x_i^2}$. Suggerimento: la funzione sqrt è una funzione della libreria math e deve essere importata con ìl comando from math import sqrt.
End of explanation
"""
# Primo esempio di lista "infinita"
def Counter():
c = 1
while True:
yield c
c += 1
cnt = Counter()
print(type(cnt))
print([next(cnt) for _ in range(10)])
# Implementazione naive in Python della funzione builtin `enumerate(Ls)`
def Enumerate(Ls):
i = 0
for l in Ls:
yield i,l
i += 1
print(Enumerate("CiaoBella"))
print(list(Enumerate("CiaoBella")))
"""
Explanation: 4. Lazy Evaluation e Liste infinite
Negli esempi sopra, le funzioni map e filter erano funzioni che restituivano delle lista che venivano elaborate in maniera LAZY: ovvero, l'elemento i-esimo della lista non sono calcolato sino a quando non è effettivamente necessario per effettuare qualche calcolo. Per questo motivo, si dice che la lista viene valutata in modalità lazy. Questo è un concetto molto generale, che in Python viene implementato attraverso degli oggetti chiamati generators e iterators. I primi sono gli oggetti che generano le liste, mentre i secondi sono gli oggetti che permettono di valutare tali liste in modalità lazy, valutando un elemento alla volta solo quando viene effettivamente richiesto. Per poter definire una funzione che genera una lista infinita si usa la parola chiave yield invece del solito return. Per richiedere un elemento alla volta di un iteratore si utilizza la builtin next() (vedere l'esempio sotto).
Si noti, che sfruttando una valutazione delle liste di tipo LAZY è quindi possibile definire delle liste di lunghezza infinita, che però vengono valutate solamente quando uno dei suoi elementi è veramente necessario per effettuare dei conti.
ESEMPI:
1. Funzione Counter() che restituisce la lista infinita dei numeri natuali a partire da 1.
2. Funzione builtin Enumerate(Ls) che restituisce la coppia (i,l) dove l è l'i-esimo elemento della lista Ls.
End of explanation
"""
# Terzo esempio di lista infinita
def NumeriPrimi():
# DA COMPLETARE
crivello = NumeriPrimi()
print([next(crivello) for _ in range(10)])
"""
Explanation: ESERCIZIO 4: Implementare una funzione che restituisce lista infinita dei numeri primi primi.
End of explanation
"""
|
steinam/teacher | jup_notebooks/data-science-ipython-notebooks-master/deep-learning/deep-dream/dream.ipynb | mit | # imports and basic notebook setup
from cStringIO import StringIO
import numpy as np
import scipy.ndimage as nd
import PIL.Image
from IPython.display import clear_output, Image, display
from google.protobuf import text_format
import caffe
# If your GPU supports CUDA and Caffe was built with CUDA support,
# uncomment the following to run Caffe operations on the GPU.
# caffe.set_mode_gpu()
# caffe.set_device(0) # select GPU device if multiple devices exist
def showarray(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 255))
f = StringIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
"""
Explanation: Deep Dreams (with Caffe)
Credits: Forked from DeepDream by Google
This notebook demonstrates how to use the Caffe neural network framework to produce "dream" visuals shown in the Google Research blog post.
It'll be interesting to see what imagery people are able to generate using the described technique. If you post images to Google+, Facebook, or Twitter, be sure to tag them with #deepdream so other researchers can check them out too.
Dependencies
This notebook is designed to have as few dependencies as possible:
* Standard Python scientific stack: NumPy, SciPy, PIL, IPython. Those libraries can also be installed as a part of one of the scientific packages for Python, such as Anaconda or Canopy.
* Caffe deep learning framework (installation instructions).
* Google protobuf library that is used for Caffe model manipulation.
End of explanation
"""
model_path = '../caffe/models/bvlc_googlenet/' # substitute your path here
net_fn = model_path + 'deploy.prototxt'
param_fn = model_path + 'bvlc_googlenet.caffemodel'
# Patching model to be able to compute gradients.
# Note that you can also manually add "force_backward: true" line to "deploy.prototxt".
model = caffe.io.caffe_pb2.NetParameter()
text_format.Merge(open(net_fn).read(), model)
model.force_backward = True
open('tmp.prototxt', 'w').write(str(model))
net = caffe.Classifier('tmp.prototxt', param_fn,
mean = np.float32([104.0, 116.0, 122.0]), # ImageNet mean, training set dependent
channel_swap = (2,1,0)) # the reference model has channels in BGR order instead of RGB
# a couple of utility functions for converting to and from Caffe's input image layout
def preprocess(net, img):
return np.float32(np.rollaxis(img, 2)[::-1]) - net.transformer.mean['data']
def deprocess(net, img):
return np.dstack((img + net.transformer.mean['data'])[::-1])
"""
Explanation: Loading DNN model
In this notebook we are going to use a GoogLeNet model trained on ImageNet dataset.
Feel free to experiment with other models from Caffe Model Zoo. One particularly interesting model was trained in MIT Places dataset. It produced many visuals from the original blog post.
End of explanation
"""
def objective_L2(dst):
dst.diff[:] = dst.data
def make_step(net, step_size=1.5, end='inception_4c/output',
jitter=32, clip=True, objective=objective_L2):
'''Basic gradient ascent step.'''
src = net.blobs['data'] # input image is stored in Net's 'data' blob
dst = net.blobs[end]
ox, oy = np.random.randint(-jitter, jitter+1, 2)
src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2) # apply jitter shift
net.forward(end=end)
objective(dst) # specify the optimization objective
net.backward(start=end)
g = src.diff[0]
# apply normalized ascent step to the input image
src.data[:] += step_size/np.abs(g).mean() * g
src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2) # unshift image
if clip:
bias = net.transformer.mean['data']
src.data[:] = np.clip(src.data, -bias, 255-bias)
"""
Explanation: Producing dreams
Making the "dream" images is very simple. Essentially it is just a gradient ascent process that tries to maximize the L2 norm of activations of a particular DNN layer. Here are a few simple tricks that we found useful for getting good images:
* offset image by a random jitter
* normalize the magnitude of gradient ascent steps
* apply ascent across multiple scales (octaves)
First we implement a basic gradient ascent step function, applying the first two tricks:
End of explanation
"""
def deepdream(net, base_img, iter_n=10, octave_n=4, octave_scale=1.4,
end='inception_4c/output', clip=True, **step_params):
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in xrange(octave_n-1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
src = net.blobs['data']
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
src.reshape(1,3,h,w) # resize the network's input image size
src.data[0] = octave_base+detail
for i in xrange(iter_n):
make_step(net, end=end, clip=clip, **step_params)
# visualization
vis = deprocess(net, src.data[0])
if not clip: # adjust image contrast if clipping is disabled
vis = vis*(255.0/np.percentile(vis, 99.98))
showarray(vis)
print octave, i, end, vis.shape
clear_output(wait=True)
# extract details produced on the current octave
detail = src.data[0]-octave_base
# returning the resulting image
return deprocess(net, src.data[0])
"""
Explanation: Next we implement an ascent through different scales. We call these scales "octaves".
End of explanation
"""
img = np.float32(PIL.Image.open('sky1024px.jpg'))
showarray(img)
"""
Explanation: Now we are ready to let the neural network reveal its dreams! Let's take a cloud image as a starting point:
End of explanation
"""
_=deepdream(net, img)
"""
Explanation: Running the next code cell starts the detail generation process. You may see how new patterns start to form, iteration by iteration, octave by octave.
End of explanation
"""
_=deepdream(net, img, end='inception_3b/5x5_reduce')
"""
Explanation: The complexity of the details generated depends on which layer's activations we try to maximize. Higher layers produce complex features, while lower ones enhance edges and textures, giving the image an impressionist feeling:
End of explanation
"""
net.blobs.keys()
"""
Explanation: We encourage readers to experiment with layer selection to see how it affects the results. Execute the next code cell to see the list of different layers. You can modify the make_step function to make it follow some different objective, say to select a subset of activations to maximize, or to maximize multiple layers at once. There is a huge design space to explore!
End of explanation
"""
!mkdir frames
frame = img
frame_i = 0
h, w = frame.shape[:2]
s = 0.05 # scale coefficient
for i in xrange(100):
frame = deepdream(net, frame)
PIL.Image.fromarray(np.uint8(frame)).save("frames/%04d.jpg"%frame_i)
frame = nd.affine_transform(frame, [1-s,1-s,1], [h*s/2,w*s/2,0], order=1)
frame_i += 1
"""
Explanation: What if we feed the deepdream function its own output, after applying a little zoom to it? It turns out that this leads to an endless stream of impressions of the things that the network saw during training. Some patterns fire more often than others, suggestive of basins of attraction.
We will start the process from the same sky image as above, but after some iteration the original image becomes irrelevant; even random noise can be used as the starting point.
End of explanation
"""
Image(filename='frames/0029.jpg')
"""
Explanation: Be careful running the code above, it can bring you into very strange realms!
End of explanation
"""
guide = np.float32(PIL.Image.open('flowers.jpg'))
showarray(guide)
"""
Explanation: Controlling dreams
The image detail generation method described above tends to produce some patterns more often the others. One easy way to improve the generated image diversity is to tweak the optimization objective. Here we show just one of many ways to do that. Let's use one more input image. We'd call it a "guide".
End of explanation
"""
end = 'inception_3b/output'
h, w = guide.shape[:2]
src, dst = net.blobs['data'], net.blobs[end]
src.reshape(1,3,h,w)
src.data[0] = preprocess(net, guide)
net.forward(end=end)
guide_features = dst.data[0].copy()
"""
Explanation: Note that the neural network we use was trained on images downscaled to 224x224 size. So high resolution images might have to be downscaled, so that the network could pick up their features. The image we use here is already small enough.
Now we pick some target layer and extract guide image features.
End of explanation
"""
def objective_guide(dst):
x = dst.data[0].copy()
y = guide_features
ch = x.shape[0]
x = x.reshape(ch,-1)
y = y.reshape(ch,-1)
A = x.T.dot(y) # compute the matrix of dot-products with guide features
dst.diff[0].reshape(ch,-1)[:] = y[:,A.argmax(1)] # select ones that match best
_=deepdream(net, img, end=end, objective=objective_guide)
"""
Explanation: Instead of maximizing the L2-norm of current image activations, we try to maximize the dot-products between activations of current image, and their best matching correspondences from the guide image.
End of explanation
"""
|
hglanz/phys202-2015-work | assignments/assignment03/NumpyEx04.ipynb | mit | import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
"""
Explanation: Numpy Exercise 4
Imports
End of explanation
"""
import networkx as nx
K_5=nx.complete_graph(5)
nx.draw(K_5)
"""
Explanation: Complete graph Laplacian
In discrete mathematics a Graph is a set of vertices or nodes that are connected to each other by edges or lines. If those edges don't have directionality, the graph is said to be undirected. Graphs are used to model social and communications networks (Twitter, Facebook, Internet) as well as natural systems such as molecules.
A Complete Graph, $K_n$ on $n$ nodes has an edge that connects each node to every other node.
Here is $K_5$:
End of explanation
"""
def complete_deg(n):
"""Return the integer valued degree matrix D for the complete graph K_n."""
D = np.diag([n-1 for i in range(n)])
return D
#raise NotImplementedError()
D = complete_deg(5)
assert D.shape==(5,5)
assert D.dtype==np.dtype(int)
assert np.all(D.diagonal()==4*np.ones(5))
assert np.all(D-np.diag(D.diagonal())==np.zeros((5,5),dtype=int))
"""
Explanation: The Laplacian Matrix is a matrix that is extremely important in graph theory and numerical analysis. It is defined as $L=D-A$. Where $D$ is the degree matrix and $A$ is the adjecency matrix. For the purpose of this problem you don't need to understand the details of these matrices, although their definitions are relatively simple.
The degree matrix for $K_n$ is an $n \times n$ diagonal matrix with the value $n-1$ along the diagonal and zeros everywhere else. Write a function to compute the degree matrix for $K_n$ using NumPy.
End of explanation
"""
def complete_adj(n):
"""Return the integer valued adjacency matrix A for the complete graph K_n."""
D = np.ones((n,n), dtype = int) - np.diag([1 for i in range(n)])
return D
#raise NotImplementedError()
A = complete_adj(5)
assert A.shape==(5,5)
assert A.dtype==np.dtype(int)
assert np.all(A+np.eye(5,dtype=int)==np.ones((5,5),dtype=int))
"""
Explanation: The adjacency matrix for $K_n$ is an $n \times n$ matrix with zeros along the diagonal and ones everywhere else. Write a function to compute the adjacency matrix for $K_n$ using NumPy.
End of explanation
"""
def spectrum(ns, printq = False, graph = False):
for i in range(len(ns)):
L = complete_deg(ns[i]) - complete_adj(ns[i])
eigs = np.linalg.eigvals(L)
if printq:
print("Complete Graph with %s vertices (eigenvalues):" % ns[i])
print(eigs)
print("\n")
if graph:
va.enable()
eigs
print("\n")
spectrum(range(3,11), True)
#raise NotImplementedError()
"""
Explanation: Use NumPy to explore the eigenvalues or spectrum of the Laplacian L of $K_n$. What patterns do you notice as $n$ changes? Create a conjecture about the general Laplace spectrum of $K_n$.
End of explanation
"""
|
bendichter/tenseflow | change_tenses.ipynb | mit | sentences = parse(text).split()
[x for x in sentences[0] if x[0] == 'thought']
"""
Explanation: Here 'thought' was not changed. Let's check if it was labeled as a noun.
End of explanation
"""
nlp = English()
doc=nlp(text)
[x for x in list(doc.sents)[0] if x.text == 'thought'][0].tag_
"""
Explanation: Yup, it's labeled as a noun phrase (NP). Let's try the spaCy parser.
End of explanation
"""
def change_tense_spaCy(text, to_tense):
doc = nlp(unicode(text))
out = []
out.append(doc[0].text)
for word_pair in pairwise(doc):
if (word_pair[0].string == 'will' and word_pair[1].pos_ == u'VERB') \
or word_pair[1].tag_ == u'VBD' or word_pair[1].tag_ == u'VBP':
if to_tense == 'present':
out.append(conjugate(word_pair[1].text, PRESENT))
elif to_tense == 'past':
out.append(conjugate(word_pair[1].text, PAST))
elif to_tense == 'future':
out.append('will')
out.append(conjugate(word_pair[1].text, 'inf'))
elif word_pair[1].text == 'will' and word_pair[1].tag_ == 'MD':
pass
else:
out.append(word_pair[1].text)
text_out = ' '.join(out)
for char in string.punctuation:
if char in """(<['‘""":
text_out = text_out.replace(char+' ',char)
else:
text_out = text_out.replace(' '+char,char)
text_out = text_out.replace(" 's","'s") #fix posessive 's
return text_out
print(change_tense_spaCy(text, 'present'))
print(change_tense_spaCy(text,"future"))
"""
Explanation: Well that's good, spaCy got it right! Let's build the same parser, but using spaCy instead of pattern.
End of explanation
"""
text = "White rabbits with pink eyes ran close by her."
change_tense_spaCy(text, 'present')
"""
Explanation: Looking good! However, it will fail if we make the following change to the last sentence:
End of explanation
"""
from spacy.symbols import NOUN
SUBJ_DEPS = {'agent', 'csubj', 'csubjpass', 'expl', 'nsubj', 'nsubjpass'}
def _get_conjuncts(tok):
"""
Return conjunct dependents of the leftmost conjunct in a coordinated phrase,
e.g. "Burton, [Dan], and [Josh] ...".
"""
return [right for right in tok.rights
if right.dep_ == 'conj']
def is_plural_noun(token):
"""
Returns True if token is a plural noun, False otherwise.
Args:
token (``spacy.Token``): parent document must have POS information
Returns:
bool
"""
if token.doc.is_tagged is False:
raise ValueError('token is not POS-tagged')
return True if token.pos == NOUN and token.lemma != token.lower else False
def get_subjects_of_verb(verb):
"""Return all subjects of a verb according to the dependency parse."""
subjs = [tok for tok in verb.lefts
if tok.dep_ in SUBJ_DEPS]
# get additional conjunct subjects
subjs.extend(tok for subj in subjs for tok in _get_conjuncts(subj))
return subjs
def is_plural_verb(token):
if token.doc.is_tagged is False:
raise ValueError('token is not POS-tagged')
subjects = get_subjects_of_verb(token)
plural_score = sum([is_plural_noun(x) for x in subjects])/len(subjects)
return plural_score > .5
conjugate??
def change_tense_spaCy(text, to_tense):
doc = nlp(unicode(text))
out = []
out.append(doc[0].text)
for word_pair in pairwise(doc):
if (word_pair[0].string == 'will' and word_pair[1].pos_ == u'VERB') \
or word_pair[1].tag_ == u'VBD' or word_pair[1].tag_ == u'VBP':
if to_tense == 'present':
if is_plural_verb(word_pair[1]):
out.append(conjugate(word_pair[1].text, PRESENT, None, PLURAL))
else:
out.append(conjugate(word_pair[1].text, PRESENT))
elif to_tense == 'past':
out.append(conjugate(word_pair[1].text, PAST))
elif to_tense == 'future':
out.append('will')
out.append(conjugate(word_pair[1].text, 'inf'))
elif word_pair[1].text == 'will' and word_pair[1].tag_ == 'MD':
pass
else:
out.append(word_pair[1].text)
text_out = ' '.join(out)
for char in string.punctuation:
if char in """(<['‘""":
text_out = text_out.replace(char+' ',char)
else:
text_out = text_out.replace(' '+char,char)
text_out = text_out.replace(" 's","'s") #fix posessive 's
return text_out
text_plural_check = "Rabbits with white fur ran close by her."
change_tense_spaCy(text_plural_check, 'present')
nlp = English()
sent = u"I was shooting an elephant"
doc=nlp(sent)
sub_toks = [tok for tok in doc if (tok.dep_ == "nsubj") ]
print(sub_toks)
# Finding a verb with a subject from below — good
verbs = set()
for possible_subject in doc:
if possible_subject.dep == nsubj and possible_subject.head.pos == VERB:
verbs.add((possible_subject, possible_subject.head))
verbs
text2 = "We will see about that"
sentences = parse(text2).split()
sentences
pprint(parsetree("I walk to the store"))
pairwise(sentences[0])[0]
parse("I will walk").split()
text2 = """Dr. Dichter's interest in community psychiatry began as a fourth year resident when he and a co-resident ran a psychiatric inpatient and outpatient program at Fort McCoy Wisconsin treating formally institutionalized chronically mentally ill Cuban refugees from the Mariel Boatlift. He came to Philadelphia to provide short-term inpatient treatment, alleviating emergency room congestion. There he first encountered the problems of homelessness and was particularly interested in the relationship between the homeless and their families. Dr. Dichter has been the Director of an outpatient department and inpatient unit, as well as the Director of Family Therapy at AEMC. His work with families focused on the impact of chronic mental illness on the family system. He was the first Medical Director for a Medicaid Managed Care Organization and has consulted with SAMHSA, CMS and several states assisting them to monitor access and quality of care for their public patients. He currently is the Medical Director for Pathways to Housing PA, where he has assists chronically homeless to maintain stable housing and recover from the ravages of mental illness and substance abuse."""
text2
change_tense_spaCy(text2,'future')
s = parsetree(text2,relations=True)[0]
' '.join([chunk.string for chunk in s.chunks])
s.string
conjugate('focussed','inf',parse=False)
tenses('focused')
from stat_parser import Parser
parser = Parser()
text = "He came to Philadelphia to provide short-term inpatient treatment, alleviating emergency room congestion."
text = "I will be there."
result = parser.parse(text)
result
sentence = result
LABELS = [x._label for x in sentence[0]]
vps = [x for x in sentence[0] if x._label == 'VP']
#verbs = x for x in vps
WORDS,POS = zip(*result.pos())
vps[0].pos()
vps[0]
doc
#fix formatting
import string
##TODO: fix spacing around single and double quotes
"""
Explanation: This fails because the verb "ran" confujates to "runs" if the subject is singular, but conjugates to "run" if the subject is plural. To fix this, we'll have to figure out a way to tell the verb the number of its subject.
End of explanation
"""
|
davidrpugh/pyCollocation | examples/solow-model.ipynb | mit | def cobb_douglas_output(k, alpha, **params):
return k**alpha
"""
Explanation: <h2>Example: Solow model with Cobb-Douglas production</h2>
The Solow model is a model of economic growth as a process of physical capital accumulation. By far the most common version of the Solow model assumes Cobb-Douglas functional form for intensive output:
$$ f(k) = k^{\alpha}. $$
End of explanation
"""
def standard_solow_model(t, k, alpha, delta, g, n, s, **params):
return [s * cobb_douglas_output(k, alpha) - (g + n + delta) * k]
def initial_condition(t, k, k0, **params):
return [k - k0]
"""
Explanation: After a bit of algebra, the Solow model with Cobb-Douglas production can be reduced down to a single non-linear ordinary differential equation (ODE) and an initial condition for capital (per unit effective labor supply)...
$$ \dot{k}(t) = s k(t)^{\alpha} - (g + n + \delta) k(t),\ k(0) = k_0 $$
...the above equation says that the rate of change of the stock of physical capital (per unit effective labor supply), $\dot{k}(t)$, is the difference between the actual level of investment in physical capital, $sk(t)^{\alpha}$, and the amount of investment required to maintain the current level of physical capital, $(g + n + \delta) k(t)$.
End of explanation
"""
params = {'g': 0.02, 's': 0.1, 'n': 0.02, 'alpha': 0.15, 'delta': 0.04, 'k0': 1.0}
"""
Explanation: To complete the model we need to define some parameter values.
End of explanation
"""
pycollocation.problems.IVP?
standard_solow_ivp = pycollocation.problems.IVP(bcs_lower=initial_condition,
number_bcs_lower=1,
number_odes=1,
params=params,
rhs=standard_solow_model,
)
"""
Explanation: <h2>Solving the model with pyCollocation</h2>
<h3>Defining a `pycollocation.IVP` instance</h3>
End of explanation
"""
def equilibrium_capital(alpha, delta, g, n, s, **params):
"""Equilibrium value of capital (per unit effective labor supply)."""
return (s / (g + n + delta))**(1 / (1 - alpha))
def initial_mesh(t, T, num, problem):
ts = np.linspace(t, T, num=num)
kstar = equilibrium_capital(**problem.params)
ks = kstar - (kstar - params['k0']) * np.exp(-ts)
return ts, ks
"""
Explanation: Finding a good initial guess for $k(t)$
Theory tells us that, starting from some initial condition $k_0$, the solution to the Solow model converges monotonically toward its long run equilibrium value $k^*$. Our initial guess for the solution should preserve this property...
End of explanation
"""
pycollocation.solvers.Solver?
"""
Explanation: Solving the model
End of explanation
"""
# Choose your basis functions and create a solver
polynomial_basis = pycollocation.basis_functions.PolynomialBasis()
solver = pycollocation.solvers.Solver(polynomial_basis)
# compute the initial mesh
boundary_points = (0, 100)
ts, ks = initial_mesh(*boundary_points, num=1000, problem=standard_solow_ivp)
# compute the initial coefs guess
basis_kwargs = {'kind': 'Chebyshev', 'domain': boundary_points, 'degree': 15}
k_poly = polynomial_basis.fit(ts, ks, **basis_kwargs)
initial_coefs = k_poly.coef
# specify the collocation nodes
nodes = polynomial_basis.roots(**basis_kwargs)
# solve the model!
solution = solver.solve(basis_kwargs, boundary_points, initial_coefs,
nodes, standard_solow_ivp)
k_soln, = solution.evaluate_solution(ts)
plt.plot(ts, k_soln)
plt.show()
k_resids, = solution.evaluate_residual(ts)
plt.plot(ts, k_resids)
plt.show()
k_normalized_resids, = solution.normalize_residuals(ts)
plt.plot(ts, np.abs(k_normalized_resids))
plt.yscale('log')
plt.show()
"""
Explanation: <h3>Orthogonal polynomial basis functions</h3>
End of explanation
"""
bspline_basis = pycollocation.basis_functions.BSplineBasis()
solver = pycollocation.solvers.Solver(bspline_basis)
ts, ks = initial_mesh(*boundary_points, num=250, problem=standard_solow_ivp)
tck, u = bspline_basis.fit([ks], u=ts, k=5, s=0)
knots, coefs, k = tck
initial_coefs = np.hstack(coefs)
basis_kwargs = {'knots': knots, 'degree': k, 'ext': 2}
nodes = np.linspace(*boundary_points, num=249)
solution = solver.solve(basis_kwargs, boundary_points, initial_coefs,
nodes, standard_solow_ivp)
solution.result.success
k_soln, = solution.evaluate_solution(ts)
plt.plot(ts, k_soln)
plt.show()
k_resids, = solution.evaluate_residual(ts)
plt.plot(ts, k_resids)
plt.show()
k_normalized_resids, = solution.normalize_residuals(ts)
plt.plot(ts, np.abs(k_normalized_resids))
plt.yscale('log')
plt.show()
"""
Explanation: <h3>B-spline basis functions</h3>
End of explanation
"""
from pycollocation.tests import models
"""
Explanation: <h1> Example: Generic Solow model of economic growth</h1>
Can we refactor the code above so that it can be solve a Solow model for arbitrary intensive production $f$? Yes!
End of explanation
"""
def ces_output(k, alpha, sigma, **params):
rho = (sigma - 1) / sigma
if rho == 0:
y = cobb_douglas_output(k, alpha)
else:
y = (alpha * k**rho + (1 - alpha))**(1 / rho)
return y
def ces_equilibrium_capital(g, n, s, alpha, delta, sigma, **params):
"""Steady state value for capital stock (per unit effective labor)."""
rho = (sigma - 1) / sigma
if rho == 0:
kss = (s / (g + n + delta))**(1 / (1 - alpha))
else:
kss = ((1 - alpha) / (((g + n + delta) / s)**rho - alpha))**(1 / rho)
return kss
ces_params = {'g': 0.02, 's': 0.1, 'n': 0.02, 'alpha': 0.15, 'delta': 0.04,
'sigma': 0.05, 'k0': 1.0}
generic_solow_ivp = models.SolowModel(ces_output,
ces_equilibrium_capital,
ces_params)
polynomial_basis = pycollocation.basis_functions.PolynomialBasis()
solver = pycollocation.solvers.Solver(polynomial_basis)
basis_kwargs = {'kind': 'Chebyshev', 'domain': [0, 100], 'degree': 15}
ts, ks = initial_mesh(basis_kwargs['domain'], 1000, standard_solow_ivp)
k_poly = polynomial_basis.fit(ts, ks, **basis_kwargs)
initial_coefs = k_poly.coef
solution = solver.solve(basis_kwargs, initial_coefs, standard_solow_ivp)
k_soln, = solution.evaluate_solution(ts)
plt.plot(ts, k_soln)
plt.show()
k_normalized_resids, = solution.normalize_residuals(ts)
plt.plot(ts, np.abs(k_normalized_resids))
plt.yscale('log')
plt.show()
"""
Explanation: Example usage...
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/awi/cmip6/models/sandbox-2/seaice.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'awi', 'sandbox-2', 'seaice')
"""
Explanation: ES-DOC CMIP6 Model Properties - Seaice
MIP Era: CMIP6
Institute: AWI
Source ID: SANDBOX-2
Topic: Seaice
Sub-Topics: Dynamics, Thermodynamics, Radiative Processes.
Properties: 80 (63 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:38
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties --> Model
2. Key Properties --> Variables
3. Key Properties --> Seawater Properties
4. Key Properties --> Resolution
5. Key Properties --> Tuning Applied
6. Key Properties --> Key Parameter Values
7. Key Properties --> Assumptions
8. Key Properties --> Conservation
9. Grid --> Discretisation --> Horizontal
10. Grid --> Discretisation --> Vertical
11. Grid --> Seaice Categories
12. Grid --> Snow On Seaice
13. Dynamics
14. Thermodynamics --> Energy
15. Thermodynamics --> Mass
16. Thermodynamics --> Salt
17. Thermodynamics --> Salt --> Mass Transport
18. Thermodynamics --> Salt --> Thermodynamics
19. Thermodynamics --> Ice Thickness Distribution
20. Thermodynamics --> Ice Floe Size Distribution
21. Thermodynamics --> Melt Ponds
22. Thermodynamics --> Snow Processes
23. Radiative Processes
1. Key Properties --> Model
Name of seaice model used.
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of sea ice model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Variables
List of prognostic variable in the sea ice model.
2.1. Prognostic
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of prognostic variables in the sea ice component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Seawater Properties
Properties of seawater relevant to sea ice
3.1. Ocean Freezing Point
Is Required: TRUE Type: ENUM Cardinality: 1.1
Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Ocean Freezing Point Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant seawater freezing point, specify this value.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Resolution
Resolution of the sea ice grid
4.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Number Of Horizontal Gridpoints
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Tuning Applied
Tuning applied to sea ice model component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Target
Is Required: TRUE Type: STRING Cardinality: 1.1
What was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Simulations
Is Required: TRUE Type: STRING Cardinality: 1.1
*Which simulations had tuning applied, e.g. all, not historical, only pi-control? *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Metrics Used
Is Required: TRUE Type: STRING Cardinality: 1.1
List any observed metrics used in tuning model/parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.5. Variables
Is Required: FALSE Type: STRING Cardinality: 0.1
Which variables were changed during the tuning process?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Key Parameter Values
Values of key parameters
6.1. Typical Parameters
Is Required: FALSE Type: ENUM Cardinality: 0.N
What values were specificed for the following parameters if used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Additional Parameters
Is Required: FALSE Type: STRING Cardinality: 0.N
If you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Assumptions
Assumptions made in the sea ice model
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.N
General overview description of any key assumptions made in this model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. On Diagnostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
Note any assumptions that specifically affect the CMIP6 diagnostic sea ice variables.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Missing Processes
Is Required: TRUE Type: STRING Cardinality: 1.N
List any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation
Conservation in the sea ice component
8.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Provide a general description of conservation methodology.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.2. Properties
Is Required: TRUE Type: ENUM Cardinality: 1.N
Properties conserved in sea ice by the numerical schemes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Budget
Is Required: TRUE Type: STRING Cardinality: 1.1
For each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.4. Was Flux Correction Used
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does conservation involved flux correction?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.5. Corrected Conserved Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List any variables which are conserved by more than the numerical scheme alone.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9. Grid --> Discretisation --> Horizontal
Sea ice discretisation in the horizontal
9.1. Grid
Is Required: TRUE Type: ENUM Cardinality: 1.1
Grid on which sea ice is horizontal discretised?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.2. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the type of sea ice grid?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.3. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the advection scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.4. Thermodynamics Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
What is the time step in the sea ice model thermodynamic component in seconds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.5. Dynamics Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
What is the time step in the sea ice model dynamic component in seconds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.6. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional horizontal discretisation details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Grid --> Discretisation --> Vertical
Sea ice vertical properties
10.1. Layering
Is Required: TRUE Type: ENUM Cardinality: 1.N
What type of sea ice vertical layers are implemented for purposes of thermodynamic calculations?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 10.2. Number Of Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
If using multi-layers specify how many.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional vertical grid details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 11. Grid --> Seaice Categories
What method is used to represent sea ice categories ?
11.1. Has Mulitple Categories
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Set to true if the sea ice model has multiple sea ice categories.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Number Of Categories
Is Required: TRUE Type: INTEGER Cardinality: 1.1
If using sea ice categories specify how many.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Category Limits
Is Required: TRUE Type: STRING Cardinality: 1.1
If using sea ice categories specify each of the category limits.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Ice Thickness Distribution Scheme
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the sea ice thickness distribution scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.5. Other
Is Required: FALSE Type: STRING Cardinality: 0.1
If the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12. Grid --> Snow On Seaice
Snow on sea ice details
12.1. Has Snow On Ice
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow on ice represented in this model?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 12.2. Number Of Snow Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels of snow on ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Snow Fraction
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how the snow fraction on sea ice is determined
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.4. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional details related to snow on ice.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Dynamics
Sea Ice Dynamics
13.1. Horizontal Transport
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of horizontal advection of sea ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Transport In Thickness Space
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of sea ice transport in thickness space (i.e. in thickness categories)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.3. Ice Strength Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Which method of sea ice strength formulation is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.4. Redistribution
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which processes can redistribute sea ice (including thickness)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Rheology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Rheology, what is the ice deformation formulation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Thermodynamics --> Energy
Processes related to energy in sea ice thermodynamics
14.1. Enthalpy Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the energy formulation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.2. Thermal Conductivity
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of thermal conductivity is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.3. Heat Diffusion
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of heat diffusion?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.4. Basal Heat Flux
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method by which basal ocean heat flux is handled?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.5. Fixed Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.6. Heat Content Of Precipitation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method by which the heat content of precipitation is handled.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.7. Precipitation Effects On Salinity
Is Required: FALSE Type: STRING Cardinality: 0.1
If precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Thermodynamics --> Mass
Processes related to mass in sea ice thermodynamics
15.1. New Ice Formation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method by which new sea ice is formed in open water.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Ice Vertical Growth And Melt
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method that governs the vertical growth and melt of sea ice.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.3. Ice Lateral Melting
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of sea ice lateral melting?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.4. Ice Surface Sublimation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method that governs sea ice surface sublimation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.5. Frazil Ice
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of frazil ice formation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 16. Thermodynamics --> Salt
Processes related to salt in sea ice thermodynamics.
16.1. Has Multiple Sea Ice Salinities
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 16.2. Sea Ice Salinity Thermal Impacts
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does sea ice salinity impact the thermal properties of sea ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Thermodynamics --> Salt --> Mass Transport
Mass transport of salt
17.1. Salinity Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is salinity determined in the mass transport of salt calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 17.2. Constant Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant salinity value specify this value in PSU?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the salinity profile used.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Thermodynamics --> Salt --> Thermodynamics
Salt thermodynamics
18.1. Salinity Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is salinity determined in the thermodynamic calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 18.2. Constant Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant salinity value specify this value in PSU?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the salinity profile used.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Thermodynamics --> Ice Thickness Distribution
Ice thickness distribution details.
19.1. Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is the sea ice thickness distribution represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Thermodynamics --> Ice Floe Size Distribution
Ice floe-size distribution details.
20.1. Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is the sea ice floe-size represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Please provide further details on any parameterisation of floe-size.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 21. Thermodynamics --> Melt Ponds
Characteristics of melt ponds.
21.1. Are Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are melt ponds included in the sea ice model?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21.2. Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
What method of melt pond formulation is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21.3. Impacts
Is Required: TRUE Type: ENUM Cardinality: 1.N
What do melt ponds have an impact on?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22. Thermodynamics --> Snow Processes
Thermodynamic processes in snow on sea ice
22.1. Has Snow Aging
Is Required: TRUE Type: BOOLEAN Cardinality: 1.N
Set to True if the sea ice model has a snow aging scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Snow Aging Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow aging scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22.3. Has Snow Ice Formation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.N
Set to True if the sea ice model has snow ice formation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.4. Snow Ice Formation Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow ice formation scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.5. Redistribution
Is Required: TRUE Type: STRING Cardinality: 1.1
What is the impact of ridging on snow cover?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.6. Heat Diffusion
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the heat diffusion through snow methodology in sea ice thermodynamics?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiative Processes
Sea Ice Radiative Processes
23.1. Surface Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used to handle surface albedo.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. Ice Radiation Transmission
Is Required: TRUE Type: ENUM Cardinality: 1.N
Method by which solar radiation through sea ice is handled.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/cnrm-cerfacs/cmip6/models/sandbox-1/landice.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cnrm-cerfacs', 'sandbox-1', 'landice')
"""
Explanation: ES-DOC CMIP6 Model Properties - Landice
MIP Era: CMIP6
Institute: CNRM-CERFACS
Source ID: SANDBOX-1
Topic: Landice
Sub-Topics: Glaciers, Ice.
Properties: 30 (21 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:52
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Grid
4. Glaciers
5. Ice
6. Ice --> Mass Balance
7. Ice --> Mass Balance --> Basal
8. Ice --> Mass Balance --> Frontal
9. Ice --> Dynamics
1. Key Properties
Land ice key properties
1.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Ice Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify how ice albedo is modelled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Atmospheric Coupling Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
Which variables are passed between the atmosphere and ice (e.g. orography, ice mass)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Oceanic Coupling Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
Which variables are passed between the ocean and ice
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.6. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which variables are prognostically calculated in the ice model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of land ice code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Grid
Land ice grid
3.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.2. Adaptive Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is an adative grid being used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Base Resolution
Is Required: TRUE Type: FLOAT Cardinality: 1.1
The base resolution (in metres), before any adaption
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Resolution Limit
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If an adaptive grid is being used, what is the limit of the resolution (in metres)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.5. Projection
Is Required: TRUE Type: STRING Cardinality: 1.1
The projection of the land ice grid (e.g. albers_equal_area)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Glaciers
Land ice glaciers
4.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of glaciers in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of glaciers, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 4.3. Dynamic Areal Extent
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Does the model include a dynamic glacial extent?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Ice
Ice sheet and ice shelf
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the ice sheet and ice shelf in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 5.2. Grounding Line Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the technique used for modelling the grounding line in the ice sheet-ice shelf coupling
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.3. Ice Sheet
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are ice sheets simulated?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.4. Ice Shelf
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are ice shelves simulated?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Ice --> Mass Balance
Description of the surface mass balance treatment
6.1. Surface Mass Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how and where the surface mass balance (SMB) is calulated. Include the temporal coupling frequeny from the atmosphere, whether or not a seperate SMB model is used, and if so details of this model, such as its resolution
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Ice --> Mass Balance --> Basal
Description of basal melting
7.1. Bedrock
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of basal melting over bedrock
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Ocean
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of basal melting over the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Ice --> Mass Balance --> Frontal
Description of claving/melting from the ice shelf front
8.1. Calving
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of calving from the front of the ice shelf
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Melting
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of melting from the front of the ice shelf
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Ice --> Dynamics
**
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description if ice sheet and ice shelf dynamics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.2. Approximation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Approximation type used in modelling ice dynamics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 9.3. Adaptive Timestep
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there an adaptive time scheme for the ice scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.4. Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep (in seconds) of the ice scheme. If the timestep is adaptive, then state a representative timestep.
End of explanation
"""
|
paolorivas/homeworkfoundations | 11/Homework_11_Paolo_Rivas.ipynb | mit | import pandas as pd
#import pandas as pd
import datetime
import datetime as dt
# import datetime
# import datetime as dt
dt.datetime.strptime('08/04/2013', '%m/%d/%Y')
datetime.datetime(2013, 8, 4, 0, 0)
parser = lambda date: pd.datetime.strptime(date, '%m/%d/%Y')
!head -n 10000 violations.csv > small-violations.csv
df = pd.read_csv("small-violations.csv", na_values= {'Vehicle Year': ['0']}, parse_dates=[4], date_parser=parser, dtype=str)
df.tail(20)
"""
Explanation: 1. I want to make sure my Plate ID is a string. Can't lose the leading zeroes!
2. I don't think anyone's car was built in 0AD. Discard the '0's as NaN.
3. I want the dates to be dates! Read the read_csv documentation to find out how to make pandas automatically parse dates.
End of explanation
"""
df['Date First Observed'].value_counts()
import dateutil.parser
def first_observed_function(x):
try:
x= str(x)
print("NaN")
if x == '0':
return np.nan
else:
print("transforming...")
date_clean = dateutil.parser.parse(x)
return date_clean.strftime("%Y-%d-%m")
except:
return None
first_observed_function('20130731')
df['Clean Date First Observed']= df['Date First Observed'].apply(first_observed_function)
df['Clean Date First Observed'].value_counts()
"""
Explanation: 4. "Date first observed" is a pretty weird column, but it seems like it has a date hiding inside. Using a function with .apply, transform the string (e.g. "20140324") into a Python date. Make the 0's show up as NaN.
End of explanation
"""
df['Violation Time']
def violation_time_transformed(x):
try:
hour = x[0:2]
minutes = x[2:4]
pam= x[4]
time= hour + ":" + minutes + " " + pam + 'm'
changed_time = dateutil.parser.parse(time)
return changed_time.strftime("%H:%M%p")
except:
return None
df['New Violation Time']= df['Violation Time'].apply(violation_time_transformed)
df['New Violation Time'].head(20)
"""
Explanation: 5. Violation time" is... not a time. Make it a time
End of explanation
"""
df['Vehicle Color'].value_counts()
def color(color):
if (color == "BK") or (color == "BL"):
return 'BLACK'
if (color == "WHT") or (color == "WT") or (color == 'WH'):
return 'WHITE'
else:
return color
#example
color('BK'), color('WHT'), color('BL'), color('WT'), color('WH')
df['B&W Clean Vehicle Color'] = df['Vehicle Color'].apply(color)
df['B&W Clean Vehicle Color'].value_counts()
"""
Explanation: 6. There sure are a lot of colors of cars, too bad so many of them are the same. Make "BLK" and "BLACK", "WT" and "WHITE", and any other combinations that you notice.
End of explanation
"""
!head -n 10000 DOF_Parking_Violation_Codes.csv > small_DOF_Parking_Violation_Codes.csv
violations_data = pd.read_csv("small_DOF_Parking_Violation_Codes.csv")
violations_data.head(2)
type(violations_data['CODE'])
violations_data['CODE'].value_counts()
def transform_code(x):
try:
new_code = x[0:2]
return new_code
except:
return None
single_code = violations_data['CODE'].apply(transform_code)
violations_data['int CODE'] = single_code.astype(int)
violations_data['int CODE'].dtype #now is an integer
violations_data.head(129)
#I need to do this same process to the df['Violation Code'] because to transform it to a INT
old_df = df["Violation Code"].apply(transform_code)
df.head(10)
df['Violation Code 2'] = old_df.astype(int)
#Merging the two data sets
new_df= df.merge(violations_data, left_on="Violation Code 2", right_on="int CODE")
new_df.head(40)
"""
Explanation: 7. Join the data with the Parking Violations Code dataset from the NYC Open Data site
End of explanation
"""
new_df['All Other Areas'].value_counts()
new_df[ 'Manhattan\xa0 96th St. & below'].value_counts()
#First, I will transfrom all values into integers
def money_transformer(money_string):
if money_string == '200 (Heavy Tow plus violation fine)':
string_only = money_string[:3]
return int(string_only)
if money_string == '100\n(Regular Tow, plus violation fine)':
string_only = money_string[:3]
return int(string_only)
try:
return int(money_string.replace("$","").replace(",",""))
except:
return None
new_df['All Other Areas 2'] = new_df['All Other Areas'].apply(money_transformer)
new_df['Manhattan\xa0 96th St. & below 2'] = new_df['Manhattan\xa0 96th St. & below'].apply(money_transformer)
outcome1 = new_df['All Other Areas 2'].sum()
outcome2 = new_df['Manhattan\xa0 96th St. & below 2'].sum()
print("NYC makes between","$", outcome1, "US dollars and","$", outcome2, "US dollars of parking violations")
#PS. Data set has been cut to 10000 rows for memory saving reasons. Output would be considerably higher with the complete DF.
"""
Explanation: 8. How much money did NYC make off of parking violations?
End of explanation
"""
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
new_df['Violation Code 2'].value_counts().head(10).plot.bar()
print("The most frequent is the infraction 21, followed by infraction 46 and 14")
print("this is how the top 3 are defined:")
new_df.groupby('CODE')['DEFINITION'].value_counts().sort_values(ascending=False).head(3)
#Looking for the most lucrative
violations_data.sort_values(by='CODE').head(46)
new_df['Violation Code 2'].value_counts().head(3)
#21 cost $65, 46 cost $115, 14 cost $115
def money_new(money_str):
if money_str == 1894:
return money_str * 65
if money_str == 1366:
return money_str * 115
if money_str == 987:
return money_str * 115
print("For al the 21 infractions the city has made", money_new(1894))
print("For al the 46 infractions the city has made", money_new(1366))
print("For al the 14 infractions the city has made", money_new(987))
print("Seems that infraction 46 is the most lucrative")
"""
Explanation: 9. What's the most lucrative kind of parking violation? The most frequent?
End of explanation
"""
new_df.groupby('Registration State')['All Other Areas 2'].sum().sort_values(ascending=False).head(10)
print('The city has made $274810 of the non newyorkers')
NY_fines= new_df.groupby('Registration State')['All Other Areas 2'].sum().sort_values(ascending=False).head(1)
outcome1 - NY_fines
"""
Explanation: 10. New Jersey has bad drivers, but does it have bad parkers, too? How much money does NYC make off of all non-New York vehicles?
End of explanation
"""
new_df['Registration State'].value_counts().sort_values().tail(10).plot.barh(color= 'Blue')
"""
Explanation: 11. Make a chart of the top few
End of explanation
"""
new_df['New Violation Time'].head(10)
def hour_transformer(x):
try:
time = int(x[:2])
if time <= 6:
return '12am-6am'
elif time <= 12:
return '6am-12pm'
elif time <= 18:
return '12pm-6pm'
elif time <= 24:
return '6pm-12am'
else:
pass
except:
pass
day_time = new_df['New Violation Time'].apply(hour_transformer)
day_time.value_counts().plot.pie(title='Ticket time!')
"""
Explanation: 12. What time of day do people usually get their tickets? You can break the day up into several blocks - for example 12am-6am, 6am-12pm, 12pm-6pm,6pm-12am.
End of explanation
"""
new_df['All Other Areas 2'].mean()
"""
Explanation: 13. What's the average ticket cost in NYC?¶
End of explanation
"""
new_df['Issue Date'].describe()
new_df.groupby('Issue Date')['Issue Date'].value_counts(sort=False).plot.bar(figsize=(15, 6))
plt.ylabel('Number of tickets')
plt.xlabel('Days')
#it seems like all the data is concentrated
#only in a few years
new_df.groupby('Issue Date')['All Other Areas 2'].sum().plot(kind="bar", figsize=(15, 6))
plt.ylabel('Amount in $')
plt.xlabel('Days')
"""
Explanation: 14. Make a graph of the number of tickets per day.
End of explanation
"""
#Still havent figure out how :(
"""
Explanation: 16. Manually construct a dataframe out of https://dmv.ny.gov/statistic/2015licinforce-web.pdf (only NYC boroughts - bronx, queens, manhattan, staten island, brooklyn), having columns for borough name, abbreviation, and number of licensed drivers.
End of explanation
"""
|
google/trax | trax/models/reformer/image_generation.ipynb | apache-2.0 | # Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2020 Google LLC.
End of explanation
"""
# Install JAX. This custom build raises the TPU timeout threshold, because the
# default limit of 2 minutes is too short for sampling very long sequences.
!gsutil cp gs://trax-ml/reformer/jaxlib-0.1.39-cp36-none-manylinux2010_x86_64.whl .
!gsutil cp gs://trax-ml/reformer/jax-0.1.59-cp36-none-manylinux2010_x86_64.whl .
!pip install --upgrade -q ./jaxlib-0.1.39-cp36-none-manylinux2010_x86_64.whl
!pip install --upgrade -q ./jax-0.1.59-cp36-none-manylinux2010_x86_64.whl
# Make sure the Colab Runtime is set to Accelerator: TPU.
import requests
import os
if 'TPU_DRIVER_MODE' not in globals():
url = 'http://' + os.environ['COLAB_TPU_ADDR'].split(':')[0] + ':8475/requestversion/tpu_driver0.1-dev20191206'
resp = requests.post(url)
TPU_DRIVER_MODE = 1
# The following is required to use TPU Driver as JAX's backend.
from jax.config import config
config.FLAGS.jax_xla_backend = "tpu_driver"
config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR']
print(config.FLAGS.jax_backend_target)
!pip install --upgrade -q gin git+https://github.com/google/[email protected]
from tensorflow.compat.v1.io.gfile import GFile
import gin
import os
import jax
import trax
from trax.models.beam_search import Search
from trax.supervised import inputs
import numpy as np
import jax.numpy as jnp
from scipy.special import softmax
%matplotlib inline
from matplotlib import pyplot as plt
"""
Explanation: Reformer: Image Generation
This notebook was designed to run on TPU.
To use TPUs in Colab, click "Runtime" on the main menu bar and select Change runtime type. Set "TPU" as the hardware accelerator.
End of explanation
"""
# Normally we train on the full imagenet64 training set, which is quite large so
# we won't be loading it from this notebook. Instead, let's just load a few PNG
# images to use in our data pipeline.
DATA = []
for i in range(8):
img = plt.imread(GFile('gs://trax-ml/reformer/img{}.png'.format(i), 'rb'))
# Convert from RGBA floating-point to RGB integer representation.
img = np.asarray(img[:, :, :3] * 255, dtype=np.int32)
DATA.append(img)
# We can examine one of the images to make sure we've loaded it correctly.
plt.figure(figsize=(1.5, 1.5))
plt.axis('off')
plt.imshow(DATA[0])
# We'll be using a pre-trained 12-layer Reformer model.
# First, load the config (which sets all needed hyperparameters).
!gsutil cp gs://trax-ml/reformer/imgnet64/config.gin ./config.gin
gin.parse_config_file('./config.gin')
# Now we construct a ReformerLM instance and load the pre-trained weights.
# The 'predict' mode configures the model to accept single tokens at a time,
# instead of feeding in a complete image all at once.
model_infer = trax.models.ReformerLM(mode='predict')
model_infer.init_from_file(
'gs://trax-ml/reformer/imgnet64/model.pkl', weights_only=True)
"""
Explanation: Load example data and model
End of explanation
"""
sampling_decoder = Search(
trax.models.ReformerLM,
model_infer.weights,
temperature=1.0,
max_decode_len=32*64*3,
)
"""
Explanation: Sample from the model
Now we're ready to sample from the pre-trained Reformer model. Unlike during training, sampling processes the images one pixel and channel value at a time. The TPU colab runtime has 8 cores so we can sample 8 images in parallel.
End of explanation
"""
flat_prompt = []
for i, img in enumerate(DATA[:trax.fastmath.device_count()]):
img = img.reshape((-1, 64, 3))[:32, :, :]
flat_prompt.append(img.reshape((-1,)))
prompt = np.stack(flat_prompt, 0)
print("Prompt:")
plt.figure(figsize=(10, 10*8))
for i in range(prompt.shape[0]):
plt.subplot(1, 8, i+1)
plt.axis('off')
plt.imshow(prompt[i].reshape((-1, 64, 3)), aspect='equal')
plt.show()
seqs, scores = sampling_decoder.decode(targets_prefix=prompt, batch_size=8)
print("Sampled completions:")
plt.figure(figsize=(10, 10*8))
for i in range(prompt.shape[0]):
plt.subplot(1, 8, i+1)
plt.axis('off')
plt.imshow(seqs[i, -1].reshape((-1, 64, 3)), aspect='equal')
plt.figure(figsize=(10, 10*8))
for i in range(prompt.shape[0]):
plt.subplot(1, 8, i+1)
plt.axis('off')
img = jnp.concatenate([prompt[i], seqs[i, -1]], -1)
plt.imshow(img.reshape((-1, 64, 3)), aspect='equal')
"""
Explanation: Sampling is an inherently serial process and will take up to 9 minutes to run. A good chunk of that time will be spent on JIT-compiling the code, though, so the code cell below will finish faster when re-run for a second time.
End of explanation
"""
|
LDSSA/learning-units | units/13-advanced-validation/problems/ign_problem.ipynb | mit | df = pd.read_csv("../data/ign.csv")
print(df.info())
df = df.drop('title', axis=1)
df = df.drop('url', axis=1)
df = df.drop('Unnamed: 0', axis=1)
df = df.dropna()
print(df.info())
print(df.head())
"""
Explanation: Check the data, deal with NaNs
End of explanation
"""
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
for col in df.columns.values:
#Encode only the categorical variables
if df[col].dtype=='object':
le.fit(df[col].values)
print("Encoded classes are: {}\n".format(le.classes_))
df[col]=le.transform(df[col])
print(df.head())
"""
Explanation: Encode parameters
End of explanation
"""
# Now it's your turn
"""
Explanation: Tips and objectives
Keep in mind that even if score_phrase would normally be the feature to predict, based on the genre of the game, the score (maybe this one has even a direct correlation?), the release year, etc. it might be more interesting to try to use another feature has label. Just use something that makes sense :)
If needed, feel free to apply the knowledge you have already gathered to make changes to the dataset.
The goal of this exercise is to:
Choose at least three models and use simple cross-validation. Which of the models would you implement?
Hold out different percentages of data and see how that affects the results.
Using the same three models use k-fold cross validation. Which one has the best result?
Try different values of k. How does that affect the results? Try to justify.
Is it a good idea to use leave-one-out cross-validation on this dataset?
Use random splitting. How does this affect the results?
Implement any type of preprocessing in cross-validation using pipeline. Think about how to do this without using this method (you don't need to implement it).
It's a good idea to use a random_state equal to some integer in order to replicate results.
Remember, the goal is too get acquainted with this kind of procedures. Don't stress too much with high scores. If you remember anything else you would like to try, feel free to implement it!
Implementations
End of explanation
"""
|
ilivans/information-retrieval | 07_duplicates/simhash.ipynb | mit | %%time
with open("simhash_sorted.txt") as f:
simhashes = [int(line[:-1]) for line in f.readlines()]
simhashes = np.array(simhashes, dtype=np.uint64) # found out before that simhash fits uint64
SIMHASH_SIZE = 64
num_samples = len(simhashes)
print "Number of samples:", num_samples
print "SimHash example:", format(simhashes[0], "b")
print "SimHash size:", SIMHASH_SIZE
"""
Explanation: Считаем simhash-и.
End of explanation
"""
MAX_DISTANCE = 3
NUM_PARTS = MAX_DISTANCE + 1
PART_SIZE = SIMHASH_SIZE / NUM_PARTS
neg_part_mask = "0" * PART_SIZE
pos_part_mask = "1" * PART_SIZE
masks = [neg_part_mask * part_id + pos_part_mask + neg_part_mask * (NUM_PARTS - part_id - 1)\
for part_id in range(NUM_PARTS)]
masks = np.array([int(mask, 2) for mask in masks], dtype=np.uint64)
def get_part(simhash, part_id):
return int(simhash & masks[part_id]) >> (PART_SIZE * (NUM_PARTS - part_id - 1))
%%time
simhashes_parts = np.zeros((len(simhashes), NUM_PARTS), dtype=np.int32)
for simhash_id, simhash in enumerate(simhashes):
for part_id in xrange(NUM_PARTS):
simhashes_parts[simhash_id][part_id] = get_part(simhash, part_id)
"""
Explanation: Поделим simhash-и на 4 части для индексирования.
End of explanation
"""
%%time
indices = [[list() for __ in xrange(2 ** PART_SIZE)] for _ in xrange(NUM_PARTS)]
for simhash_id in xrange(num_samples):
simhash_parts = simhashes_parts[simhash_id]
for part_id in xrange(NUM_PARTS):
indices[part_id][simhash_parts[part_id]].append(simhash_id)
"""
Explanation: Построим индексы.
End of explanation
"""
def ones_positions(num_ones, size=SIMHASH_SIZE):
if num_ones == 0:
yield []
return
for position in range(size):
for positions in ones_positions(num_ones - 1, size):
yield [position] + positions
accepted_xors = set()
for num_ones in xrange(MAX_DISTANCE + 1):
for positions in ones_positions(num_ones):
xor = ["0"] * SIMHASH_SIZE
for pos in positions:
xor[pos] = "1"
accepted_xors.add(np.uint64(int("".join(xor), 2)))
print len(accepted_xors)
def similar(hash1, hash2):
return (hash1 ^ hash2) in accepted_xors
%%time
groups_sizes = []
assigned = [False] * num_samples # indicators of simhashes assigned to any of the considered groups
num_assigned = 0
start = time()
for simhash_id, simhash in enumerate(simhashes):
if assigned[simhash_id]:
continue
group_size = 0
simhash_parts = simhashes_parts[simhash_id]
for part_id, part in enumerate(simhash_parts):
for candidate_id in indices[part_id][part]:
if assigned[candidate_id]:
continue
if similar(simhash, simhashes[candidate_id]):
group_size += 1
assigned[candidate_id] = True #.add(candidate_id)
num_assigned += 1
groups_sizes.append(group_size)
if simhash_id % 3000 == 0:
spent = time() - start
clear_output()
print "assigned: {}\tRemained time: {:.2f} days".format(
num_assigned,(float(num_samples) / num_assigned - 1) * spent / 60 / 60 / 24)
"""
Explanation: Прокластеризуем хеши.
End of explanation
"""
groups_sizes = np.array(groups_sizes)
plt.figure(figsize=(12,8))
plt.plot(groups_sizes);
plt.xlabel("Group ID")
plt.ylabel("Group size");
"""
Explanation: Отработав 6 часов, скрипт обещал работать еще около 9 суток, что слишком долго. Поэтому проведём анализ уже полученных данных как репрезентативной выборки.
End of explanation
"""
plt.figure(figsize=(12,8))
plt.hist(groups_sizes, bins=100, log=True)
plt.xlabel("Group size")
plt.ylabel("Number of groups");
"""
Explanation: Как видим, с течением времени в размере групп не наблюдается какого-либо тренда.
End of explanation
"""
|
barjacks/swiss-asylum-judges | Analysing 30000 Verdicts.ipynb | mit | import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import glob
plt.style.use('ggplot')
import dateutil.parser
import re
import time
from collections import Counter
%matplotlib inline
"""
Explanation: Analysing the Textfiles
End of explanation
"""
whole_list_of_names = []
for name in glob.glob('txtfiles/*'):
name = name.split('/')[-1]
whole_list_of_names.append(name)
len(whole_list_of_names)
"""
Explanation: Preparing list of file names to iterate through later
End of explanation
"""
def extracting_aktennummer_german(doc):
try:
entscheid = re.search(r'Abteilung [A-Z]+\n[A-Z]-[0-9]+/[0-9]+', doc)
entscheid = entscheid.group()
entscheid = entscheid.replace('\n', '')
return entscheid
except:
None
def extracting_aktennummer_french(doc):
try:
entscheid = re.search(r'Cour [A-Z]+\n[A-Z]-[0-9]+/[0-9]+', doc)
entscheid = entscheid.group()
entscheid = entscheid.replace('\n', '')
return entscheid
except:
None
def extracting_aktennummer_italian(doc):
try:
entscheid = re.search(r'Corte [A-Z]+\n[A-Z]-[0-9]+/[0-9]+', doc)
entscheid = entscheid.group()
entscheid = entscheid.replace('\n', '')
return entscheid
except:
None
"""
Explanation: In the online database there are more cases mentioned than we anaylsed. This is due to duplicates in the database and merged cases. We used 'fdupes -dN .' on the command line to locate and remove 300 plus duplicate files.
Developing Regular Expressions
Aktennummer
Every expression is developed three times, as the verdicts are in three languages, French, German and Italian.
End of explanation
"""
def extracting_entscheid_italian(doc):
try:
doc = doc.replace('\n1. \n', '')
doc = doc.replace('\n1. \n1.', '')
doc = doc.replace('\n1.\n1.', '')
entscheid = re.findall(r'Tribunale amministrativo federale pronuncia:\n.*([^.]*)', doc)
entscheid = entscheid[0].replace('Oggetto', '').replace('\n', '').strip()
entscheid = entscheid[:150]
return entscheid
except:
None
def extracting_entscheid_french(doc):
try:
doc = doc.replace('\n1. \n', '')
doc = doc.replace('\n1. \n1.', '')
doc = doc.replace('\n1.\n1.', '')
doc = doc.replace('\n1.\n\n', '')
doc = doc.replace('\n', '')
entscheid = re.findall(r'le Tribunal administratif fédéral prononce\s*:1.([^.]*)', doc)
entscheid = entscheid[:150]
return entscheid
except:
None
def extracting_entscheid_german(doc):
try:
#search_date = re.search(r'[0-9]+\.', doc)
#search_date = search_date.group()
#doc = doc.replace(search_date, '')
doc = doc.replace('4.', '')
doc = doc.replace('6.', '')
doc = doc.replace('13.', '')
doc = doc.replace('8.', '')
doc = doc.replace('24.', '')
doc = doc.replace('25.', '')
doc = doc.replace('18.', '')
doc = doc.replace('30.', '')
doc = doc.replace('\n1. \n', '')
doc = doc.replace('\n1. \n1.', '')
doc = doc.replace('\n1.\n1.', '')
entscheid = re.findall(r'erkennt das Bundesverwaltungsgericht\s*:\s*1.([^.]*)', doc)
entscheid = entscheid[:150]
return entscheid
except:
None
"""
Explanation: Entscheide
End of explanation
"""
def extracting_gegenstand_italian(doc):
try:
gegenstand = re.findall(r'Oggetto.*([^,]*)', doc)
gegenstand = gegenstand[0].replace('Oggetto', '').replace('\n', '').strip()
gegenstand = gegenstand[:84]
return gegenstand
except:
None
def extracting_gegenstand_french(doc):
try:
gegenstand = re.findall(r'Objet.*([^,]*)', doc)
gegenstand = gegenstand[0].replace('Objet', '').replace('\n', '').strip()
gegenstand = gegenstand[:84]
return gegenstand
except:
None
def extracting_gegenstand_german(doc):
try:
gegenstand = re.findall(r'Gegenstand.*([^,]*)', doc)
gegenstand = gegenstand[0].replace('Gegenstand', '').replace('\n', '').strip()
gegenstand = gegenstand[:84]
return gegenstand
except:
None
"""
Explanation: Gegenstand
Using findall, because the word might occur several times in the document. This way we can just take the first instance, making sure we are extracting correct term.
End of explanation
"""
def extracting_date_french(doc):
Datum = re.findall(r"Arrêt du [0-9]+[er]* [A-Z]*[éèàâæûa-z]+ 20[0-9]+", doc)
try:
Datum = Datum[0]
except:
None
Datum = str(Datum).replace("['", '').replace("']", '').replace('Arrêt du', '').strip()
Datum = Datum.replace('1er', '1')
return Datum
def extracting_date_german(doc):
Datum = re.findall(r"Urteil vom [0-9]+.[ ]*[ÄÖÜA-Z][äüöa-z]+ 20[0-9]+", doc)
try:
Datum = Datum[0]
except:
None
Datum = str(Datum).replace("['", '').replace("']", '').replace('Urteil vom ', '').strip()
Datum = Datum.replace(".Ap", '. Ap')
return Datum
def extracting_date_italian(doc):
#
Datum = re.findall(r"Sentenza del[l']*[ ]*[0-9]+[ |°][a-z]+ 20[0-9]+", doc)
try:
Datum = Datum[0]
except:
None
Datum = str(Datum).replace("['", '').replace("']", '').replace('Sentenza del ', '').replace('°', ' ').strip()
Datum = Datum.replace("Sentenza dell'", '')
return Datum
"""
Explanation: Date
Most important thing to keep in mind here is to use the various special characters from French, Italian and German. Same here also applies findall usage.
End of explanation
"""
df_richter = pd.read_csv('data/richter_partei.csv', delimiter=',')
"""
Explanation: Getting list of judges
The list of judges was pulled of the Website with a separate scraper. This gave us the current judges. Judges from earlier years were research by hand from documentation in Swiss parlament.
End of explanation
"""
relevant_clean_judges = list(df_richter['Nachname'])
"""
Explanation: Making the list of judges with their party
End of explanation
"""
def lawyers_countries(x):
avocat_countries = re.search('Parties\n*.*\n*.*', x)
anwalt_countries = re.search('Parteien\n*.*\n*.*', x)
avvocato_countries = re.search('Parti\n*.*\n*.*', x)
try:
if anwalt_countries != None:
anwalt_countries = anwalt_countries.group()
x = x.replace(anwalt_countries, '|||')
return x
elif avocat_countries != None:
avocat_countries = avocat_countries.group()
x = x.replace(avocat_countries, '|||')
return text
elif avvocato_countries != None:
avvocato_countries = avvocato_countries.group()
x = x.replace(avvocato_countries, '|||')
return x
else:
return x
except:
None
"""
Explanation: Dealing with the Lawyers and countries
End of explanation
"""
def gerichtsschreiber(x):
gerichtsschreiber = re.search(r'Gerichtsschreiber.*\.', x)
gerichtsschreiberin = re.search(r'Gerichtsschreiberin.*\n*', x)
cancelliera = re.search(r'cancellier.*,', x)
greffier = re.search(r'Greffier:.*', x)
try:
if gerichtsschreiber != None:
gerichtsschreiber = gerichtsschreiber.group()
x = x.replace(gerichtsschreiber, '|||')
return x
elif gerichtsschreiberin != None:
gerichtsschreiberin = gerichtsschreiberin.group()
x = x.replace(gerichtsschreiberin, '|||')
return x
elif cancelliera != None:
cancelliera = cancelliera.group()
x = x.replace(cancelliera, '|||')
return x
elif greffier != None:
greffier = greffier.group()
x = x.replace(greffier, '|||')
return x
for y in relevant_clean_judges:
y = y + ', greffi'
greffier = re.search(y, x)
if greffier != None:
greffier = greffier.group()
x = x.replace(greffier, '|||')
else:
return x
except:
None
"""
Explanation: Dealing with the Gerichtsschreiber
End of explanation
"""
#searching for the relevant judges
#Lists I already have
#whole_list_of_names i my first list
#relevant_clean_judges is my second list
main_judge_list = []
case_list = []
vorsitz_list = []
for file in whole_list_of_names: #medium_sample_list
#Importing the texts
file_name = file
file = open('txtfiles/' + file, 'r')
text = file.read()
beginning = text[0:310]
end = text[-2000:]
end = end[0:1815]
text = beginning + end
#Prepping text files
text = text.replace('E-4432/2006', 'E-4432/20 fsdfasdfaasdfasdfasdfasdfdasfasfasfasdfasfasfasdfsdfasdf')
text = text.replace(';', ',')
text = text.replace('\n\n', '\n')
text = text.replace(':\n1. Die', ':\n1.\nDie')
text = text.replace(':\n1. Le', ':\n1.\nLe')
text = text.replace(':\n1. Nella', ':\n1.\nNella')
text = text.replace('Demnach erkennt das Bundesverwaltungsgericht: \n1.\n', 'Demnach erkennt das Bundesverwaltungsgericht:\n1.\n')
text = text.replace(':\n1. Il', ':\n1.\nIl')
#dealing with Gerichtsschreiber
text = gerichtsschreiber(text)
#Pulling out lawyer's names, so they don't clash with judges names
text = lawyers_countries(text)
#Makinging small judge name lists
short_judge_list = []
for judge in relevant_clean_judges:
try:
judge = re.search(judge, text)
if judge != None:
judge = judge.group()
short_judge_list.append(judge)
else:
continue
except:
None
#Getting the date
if extracting_date_french(text) == '[]' and extracting_date_italian(text) == '[]':
date = extracting_date_german(text)
elif extracting_date_french(text) == '[]' and extracting_date_german(text) == '[]':
date = extracting_date_italian(text)
else:
date = extracting_date_french(text)
#Getting Gegenstand
if extracting_gegenstand_german(text) == None and extracting_gegenstand_french(text) == None:
gegenstand = extracting_gegenstand_italian(text)
#print(file_name, gegenstand, date)
elif extracting_gegenstand_french(text) == None and extracting_gegenstand_italian(text) == None:
gegenstand = extracting_gegenstand_german(text)
#print(file_name, gegenstand, date)
else:
gegenstand = extracting_gegenstand_french(text)
#print(file_name, gegenstand, date)
#Getting Entscheid
if extracting_entscheid_german(text) == None and extracting_entscheid_french(text) == None:
entscheid = extracting_entscheid_italian(text)
#print(file_name, entscheid, date)
elif extracting_entscheid_french(text) == None and extracting_entscheid_italian(text) == None:
entscheid = extracting_entscheid_german(text)
#print(file_name, entscheid, date)
else:
entscheid = extracting_entscheid_french(text)
#print(file_name, entscheid, date)
#Getting Aktennummer
if extracting_aktennummer_german(text) == None and extracting_aktennummer_french(text) == None:
aktennummer = extracting_aktennummer_italian(text)
#print(file_name, aktennummer, date)
elif extracting_aktennummer_french(text) == None and extracting_aktennummer_italian(text) == None:
aktennummer = extracting_aktennummer_german(text)
#print(file_name, aktennummer, date)
else:
aktennummer = extracting_aktennummer_french(text)
#print(file_name, aktennummer, date)
#Making small judge dictionaries
small_judge_list = []
try:
for judge in short_judge_list:
jugdes_small_dicts = {'judge': judge,
'date': date,
'gegenstand': gegenstand,
'decision': entscheid,
'aktennummer': aktennummer,
'myfile_number': file_name}
small_judge_list.append(jugdes_small_dicts)
except:
None
#Making separate case file
small_case_list = []
try:
case_dict = {'date': date,
'gegenstand': gegenstand,
'decision': entscheid,
'aktennummer': aktennummer,
'myfile_number': file_name}
small_case_list.append(case_dict)
except:
None
case_list = case_list + small_case_list
main_judge_list = main_judge_list + small_judge_list
"""
Explanation: Creating case list and judge list
End of explanation
"""
df_judges = pd.DataFrame(main_judge_list)
df_cases = pd.DataFrame(case_list)
df_judges
jugdes_count = df_judges['judge'].value_counts()
judges_count = pd.DataFrame(jugdes_count)
judges_count.to_csv('jugdes_full_count.csv')
"""
Explanation: Creating a DF out of the main judge list
End of explanation
"""
df_judges = df_judges[df_judges.date != '[]']
df_cases = df_cases[df_cases.date != '[]']
"""
Explanation: We have the data. But we can't start analysing it just yet. We need to harmonise the various data points, i.e. dates
First the dates
Deleting all the rows with no date. If there was a date, then the document wasn't a case file.
End of explanation
"""
def date_harm(date):
#German
date = date.replace('. Januar ', '.1.')
date = date.replace('. Februar ', '.2.')
date = date.replace('. März ', '.3.')
date = date.replace('. April ', '.4.')
date = date.replace(' April ', '.4.')
date = date.replace('. Mai ', '.5.')
date = date.replace('. Juni ', '.6.')
date = date.replace('. Juli ', '.7.')
date = date.replace('. August ', '.8.')
date = date.replace('. September ', '.9.')
date = date.replace('. Oktober ', '.10.')
date = date.replace('. November ', '.11.')
date = date.replace('. Dezember ', '.12.')
#French
date = date.replace(' janvier ', '.1.')
date = date.replace(' février ', '.2.')
date = date.replace(' mars ', '.3.')
date = date.replace(' avril ', '.4.')
date = date.replace(' mai ', '.5.')
date = date.replace(' juin ', '.6.')
date = date.replace(' juillet ', '.7.')
date = date.replace(' août ', '.8.')
date = date.replace(' septembre ', '.9.')
date = date.replace(' octobre ', '.10.')
date = date.replace(' novembre ', '.11.')
date = date.replace(' décembre ', '.12.')
#Italian
date = date.replace(' gennaio ', '.1.')
date = date.replace(' febbraio ', '.2.')
date = date.replace(' marzo ', '.3.')
date = date.replace(' aprile ', '.4.')
date = date.replace(' maggio ', '.5.')
date = date.replace(' giugno ', '.6.')
date = date.replace(' luglio ', '.7.')
date = date.replace(' agosto ', '.8.')
date = date.replace(' settembre ', '.9.')
date = date.replace(' ottobre ', '.10.')
date = date.replace(' novembre ', '.11.')
date = date.replace(' dicembre ', '.12.')
return date
df_judges['date_new'] = df_judges['date'].apply(date_harm)
df_cases['date_new'] = df_cases['date'].apply(date_harm)
"""
Explanation: Creating a function, to harmonise all the dates
End of explanation
"""
def parse_date(str_date):
try:
return dateutil.parser.parse(str_date)
except:
None
df_cases['date_new'] = df_cases['date_new'].apply(parse_date)
df_judges['date_new'] = df_judges['date_new'].apply(parse_date)
df_cases.index = df_cases['date_new']
df_judges.index = df_judges['date_new']
"""
Explanation: Making the dates the index to map them out, only in the cases file
End of explanation
"""
#Correcting wrongly posted dates.
df_cases['date_new']['2001-09-30'] = '2011-09-30'
df_judges['date_new']['2001-09-30'] = '2011-09-30'
"""
Explanation: Two dates were entered wrongly. Correcting them.
End of explanation
"""
df_cases = df_cases.drop_duplicates(keep='first')
df_judges = df_judges.drop_duplicates(keep='first')
fig, ax = plt.subplots(figsize =(10,5), facecolor='White')
df_cases.resample('M')['aktennummer'].count().plot(ax=ax)
ax.set_title("Urteile Bundesverwaltungsgericht 2007 - September 2016", fontname='DIN Condensed', fontsize=24)
"""
Explanation: Again checking for duplicates
End of explanation
"""
def decision_harm_auto(x):
try:
gutgeheissen = re.search(r'utgeheissen', x)
gutgeheissen2 = re.search(r'utheissen', x)
gutgeheissen3 = re.search(r'gutzuheissen', x)
admis = re.search(r'admis', x)
accolto = re.search(r'ccolto', x)
accolta = re.search(r'ccolta', x)
joint = re.search(r'Les causes D-3901/2008, D-3902/2008, D-3903/2008, D-3904/2008 et D-3905/2008 sont jointes', x)
annulée = re.search(r'annulée', x)
aufgehoben = re.search('aufgehoben', x)
nicht_eingetreten = re.search('nicht eingetreten', x)
abgeschrieben = re.search('abgeschrieben', x)
gegenstandslos_geworden = re.search('gegenstandslos geworden', x)
abgewiesen = re.search(r'bgewiesen', x)
abgewiesen2 = re.search(r'abge-wiesen', x)
abgewiesen3 = re.search(r'abgwiesen', x)
rejeté = re.search(r'ejet', x)
respinto = re.search(r'espint', x)
irrecevable = re.search(r'irrecevable', x)
#angenommen
if gutgeheissen != None:
x = 'Gutgeheissen'
return x
elif gutgeheissen2 != None:
x = 'Gutgeheissen'
return x
elif gutgeheissen3 != None:
x = 'Gutgeheissen'
return x
elif admis != None:
x = 'Gutgeheissen'
return x
elif accolto != None:
x = 'Gutgeheissen'
return x
elif accolta != None:
x = 'Gutgeheissen'
return x
elif aufgehoben != None:
x = 'Gutgeheissen'
return x
elif joint != None:
x = 'Gutgeheissen'
return x
elif annulée != None:
x = 'Gutgeheissen'
return x
#abgewiesen
elif abgewiesen != None:
x = 'Abgewiesen'
return x
elif rejeté != None:
x = 'Abgewiesen'
return x
elif respinto != None:
x = 'Abgewiesen'
return x
elif irrecevable != None:
x = 'Abgewiesen'
return x
elif nicht_eingetreten != None:
x = 'Abgewiesen'
return x
elif abgewiesen2 != None:
x = 'Abgewiesen'
return x
elif abgewiesen3 != None:
x = 'Abgewiesen'
return x
elif abgeschrieben != None:
x = 'Abgewiesen'
return x
elif gegenstandslos_geworden != None:
x = 'Abgewiesen'
return x
else:
return x
except:
None
df_cases['decision_harm_auto'] = df_cases['decision'].apply(decision_harm_auto)
df_judges['decision_harm_auto'] = df_judges['decision'].apply(decision_harm_auto)
"""
Explanation: Decisions
Harmonising all the decisions.
End of explanation
"""
df_cases_non_harm_count = df_cases[df_cases['decision_harm_auto'] != 'Abgewiesen']
df_cases_non_harm_count = df_cases_non_harm_count[df_cases_non_harm_count['decision_harm_auto'] != 'Gutgeheissen']
Weitergezogen_oder_vereinigt = df_cases_non_harm_count['aktennummer'].count()
Prozent_weitergezogen_etc = round((Weitergezogen_oder_vereinigt / df_cases['aktennummer'].count()) * 100, 1)
Prozent_weitergezogen_etc
"""
Explanation: Percentage of cases that weren't considered.
End of explanation
"""
#Creating new dfs with decision counts
df_gutgeheissen = pd.DataFrame(df_judges[df_judges['decision_harm_auto'] == 'Gutgeheissen']['judge'].value_counts())
df_gutgeheissen = df_gutgeheissen.reset_index()
df_abgewiesen = pd.DataFrame(df_judges[df_judges['decision_harm_auto'] == 'Abgewiesen']['judge'].value_counts())
df_abgewiesen = df_abgewiesen.reset_index()
df_judge_quota = df_gutgeheissen.merge(df_abgewiesen, left_on='index', right_on='index')
df_judge_quota.columns = [['judge', 'gutgeheissen', 'abgewiesen']]
#del df_judge_quota['index']
df_judge_quota['quota'] = round(df_judge_quota['gutgeheissen'] / (df_judge_quota['gutgeheissen'] + df_judge_quota['abgewiesen']) * 100, 1)
"""
Explanation: Comparing Judge Decisions
End of explanation
"""
df_judge_partei = pd.read_csv('data/richter_partei.csv', delimiter=',')
df_judge_quota = df_judge_quota.merge(df_judge_partei, left_on='judge', right_on='Nachname')
df_judge_quota[['judge', 'Partei', 'gutgeheissen', 'abgewiesen', 'quota']].sort_values(by='quota').head(5)
"""
Explanation: Thoughest jugdes
Bringing in the parties of the jugdges. This was scraped from the BVGer site. And gathered from documentation from Swiss Parliament.
End of explanation
"""
df_judge_quota[['judge', 'Partei', 'gutgeheissen', 'abgewiesen', 'quota']].sort_values(by='quota', ascending=False).head(5)
"""
Explanation: Softest jugdes
End of explanation
"""
df_partei_vergleich = df_judges.merge(df_judge_partei, left_on='judge', right_on='Nachname')
"""
Explanation: Merging with Richter Partei
End of explanation
"""
def strip_partei(x):
x = x.strip()
return x
df_partei_vergleich['Partei'] = df_partei_vergleich['Partei'].apply(strip_partei)
df_partei_vergleich['Partei'].value_counts()
"""
Explanation: Parteien-Vergleich
Making sure all cells are stripped
End of explanation
"""
df_P_gutgeheissen = pd.DataFrame(df_partei_vergleich[df_partei_vergleich['decision_harm_auto'] == 'Gutgeheissen']['Partei'].value_counts())
df_P_gutgeheissen = df_P_gutgeheissen.reset_index()
df_P_abgewiesen = pd.DataFrame(df_partei_vergleich[df_partei_vergleich['decision_harm_auto'] == 'Abgewiesen']['Partei'].value_counts())
df_P_abgewiesen = df_P_abgewiesen.reset_index()
df_P_quota = df_P_gutgeheissen.merge(df_P_abgewiesen, left_on='index', right_on='index')
df_P_quota.columns = [['judge', 'gutgeheissen', 'abgewiesen']]
df_P_quota['quota in %'] = round(df_P_quota['gutgeheissen'] / (df_P_quota['gutgeheissen'] + df_P_quota['abgewiesen']) * 100, 1)
df_P_quota.sort_values(by='quota in %', ascending=True)
"""
Explanation: Creating new dfs with decision counts
End of explanation
"""
fig, ax = plt.subplots(figsize =(10,5), facecolor='White')
df_cases[df_cases['decision_harm_auto'] == 'Abgewiesen'].resample('M')['aktennummer'].count().plot(ax=ax)
ax.set_title("Urteile Bundesverwaltungsgericht 2007 - , abgewiesene Klagen", fontname='DIN Condensed', fontsize=24)
df_cases[df_cases['decision_harm_auto'] == 'Gutgeheissen'].resample('M')['aktennummer'].count().plot(ax=ax)
ax.set_title("Gutgeheissene vs Abgewiesene Urteile 2007 - 2016", fontname='DIN Condensed', fontsize=24)
plt.savefig('Gutgeheissene vs Abgewiesene Urteile 2007 - 2016.png', transparent=True, bbox_inches='tight')
plt.savefig('Gutgeheissene vs Abgewiesene Urteile 2007 - 2016.pdf', transparent=True, bbox_inches='tight')
"""
Explanation: Visualising the data
End of explanation
"""
df_w = df_partei_vergleich[df_partei_vergleich['Geschlecht'] == 'w']
w_gutgeheissen = df_w[df_w['decision_harm_auto'] == 'Gutgeheissen']['aktennummer'].count()
w_abgewiesen = df_w[df_w['decision_harm_auto'] == 'Abgewiesen']['aktennummer'].count()
Prozent_w_gutgeheissen = w_gutgeheissen / (w_gutgeheissen + w_abgewiesen) * 100
Prozent_w_gutgeheissen
df_m = df_partei_vergleich[df_partei_vergleich['Geschlecht'] == 'm']
m_gutgeheissen = df_m[df_m['decision_harm_auto'] == 'Gutgeheissen']['aktennummer'].count()
m_abgewiesen = df_m[df_m['decision_harm_auto'] == 'Abgewiesen']['aktennummer'].count()
Prozent_m_gutgeheissen = m_gutgeheissen / (m_gutgeheissen + m_abgewiesen) * 100
Prozent_m_gutgeheissen
"""
Explanation: Geschlechtervergleich
End of explanation
"""
#resample documentations:
#http://stackoverflow.com/questions/17001389/pandas-resample-documentation
df_Wenger = df_judges[df_judges['judge'] == 'Wenger']
df_Wenger[df_Wenger['decision_harm_auto'] == 'Abgewiesen'].resample('Q')['aktennummer'].count().plot()
df_Wenger[df_Wenger['decision_harm_auto'] == 'Gutgeheissen'].resample('Q')['aktennummer'].count().plot()
plt.savefig('wenger.png', transparent=True, bbox_inches='tight')
plt.savefig('wenger.pdf', transparent=True, bbox_inches='tight')
df_haefeli = df_judges[df_judges['judge'] == 'Haefeli']
df_haefeli[df_haefeli['decision_harm_auto'] == 'Abgewiesen'].resample('Q')['aktennummer'].count().plot()
df_haefeli[df_haefeli['decision_harm_auto'] == 'Gutgeheissen'].resample('Q')['aktennummer'].count().plot()
plt.savefig('haefeli.png', transparent=True, bbox_inches='tight')
plt.savefig('haefeli.pdf', transparent=True, bbox_inches='tight')
df_theis = df_judges[df_judges['judge'] == 'Theis']
df_theis[df_theis['decision_harm_auto'] == 'Abgewiesen'].resample('Q')['aktennummer'].count().plot()
df_theis[df_theis['decision_harm_auto'] == 'Gutgeheissen'].resample('Q')['aktennummer'].count().plot()
plt.savefig('theis.png', transparent=True, bbox_inches='tight')
plt.savefig('theis.pdf', transparent=True, bbox_inches='tight')
"""
Explanation: Verlauf der einzelne Richter
End of explanation
"""
|