prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import scipy.io.wavfile as sio
import scipy.signal as sis
from scipy import interpolate
import numpy as np
import math
import matplotlib.pyplot as plt
import mylib as myl
import sys
import copy as cp
import re
import scipy.fftpack as sf
# NOTE: int2float might be removed after scipy update/check
# (check defaults in myl.sig_preproc)
# read wav file
# IN:
# fileName
# OUT:
# signal ndarray
# sampleRate
def wavread(f,opt={'do_preproc':True}):
## signal input
fs, s_in = sio.read(f)
# int -> float
s = myl.wav_int2float(s_in)
# preproc
if opt['do_preproc']:
s = myl.sig_preproc(s)
return s, fs
# DCT
# IN:
# y - 1D signal vector
# opt
# ['fs'] - sample rate
# ['wintyp'] - <'kaiser'>, any type supported by
# scipy.signal.get_window()
# ['winparam'] - <1> additionally needed window parameters,
# scalar, string, list ..., depends on 'wintyp'
# ['nsm'] - <3> number of spectral moments
# ['rmo'] - skip first (lowest) cosine (=constant offset)
# in spectral moment calculation <1>|0
# ['lb'] - lower cutoff frequency for coef truncation <0>
# ['ub'] - upper cutoff frequency (if 0, no cutoff) <0>
# Recommended e.g. for f0 DCT, so that only influence
# of events with <= 10Hz on f0 contour is considered)
# ['peak_prct'] - <80> lower percentile threshold to be superseeded for
# amplitude maxima in DCT spectrum
# OUT:
# dct
# ['c_orig'] all coefs
# ['f_orig'] their frequencies
# ['c'] coefs with freq between lb and ub
# ['f'] their freqs
# ['i'] their indices in c_orig
# ['sm'] spectral moments based on c
# ['opt'] input options
# ['m'] y mean
# ['sd'] y standard dev
# ['cbin'] array of sum(abs(coef)) in frequency bins
# ['fbin'] corresponding lower boundary freqs
# ['f_max'] frequency of global amplitude maximum
# ['f_lmax'] frequencies of local maxima (array of minlen 1)
# ['c_cog'] the coef amplitude of the cog freq (sm[0])
# PROBLEMS:
# - if segment is too short (< 5 samples) lowest freqs associated to
# DCT components are too high for ub, that is dct_trunc() returns
# empty array.
# -> np.nan assigned to respective variables
def dct_wrapper(y,opt):
dflt={'wintyp':'kaiser','winparam':1,'nsm':3,'rmo':True,
'lb':0,'ub':0,'peak_prct':80}
opt = myl.opt_default(opt,dflt)
# weight window
w = sig_window(opt['wintyp'],len(y),opt['winparam'])
y = y*w
#print(1,len(y))
# centralize
y = y-np.mean(y)
#print(2,len(y))
# DCT coefs
c = sf.dct(y,norm='ortho')
#print(3,len(c))
# indices (starting with 0)
ly = len(y)
ci = myl.idx_a(ly)
# corresponding cos frequencies
f = ci+1 * (opt['fs']/(ly*2))
# band pass truncation of coefs
# indices of coefs with lb <= freq <= ub
i = dct_trunc(f,ci,opt)
#print('f ci i',f,ci,i)
# analysis segment too short -> DCT freqs above ub
if len(i)==0:
sm = myl.ea()
while len(sm) <= opt['nsm']:
sm = np.append(sm,np.nan)
return {'c_orig':c,'f_orig':f,'c':myl.ea(),'f':myl.ea(),'i':[],'sm':sm,'opt':opt,
'm':np.nan,'sd':np.nan,'cbin':myl.ea(),'fbin':myl.ea(),
'f_max':np.nan, 'f_lmax':myl.ea(), 'c_cog': np.nan}
# mean abs error from band-limited IDCT
#mae = dct_mae(c,i,y)
# remove constant offset with index 0
# already removed by dct_trunc in case lb>0. Thus checked for i[0]==0
# (i[0] indeed represents constant offset; tested by
# cr = np.zeros(ly); cr[0]=c[0]; yr = sf.idct(cr); print(yr)
if opt['rmo']==True and len(i)>1 and i[0]==0:
j = i[1:len(i)]
else:
j = i
if type(j) is not list: j = [j]
# coefs and their frequencies between lb and ub
# (+ constant offset removed)
fi = f[j]
ci = c[j]
# spectral moments
if len(j)>0:
sm = specmom(ci,fi,opt['nsm'])
else:
sm = np.zeros(opt['nsm'])
# frequency bins
fbin, cbin = dct_fbin(fi,ci,opt)
# frequencies of global and local maxima in DCT spectrum
f_max, f_lmax, px = dct_peak(ci,fi,sm[0],opt)
# return
return {'c_orig':c,'f_orig':f,'c':ci,'f':fi,'i':j,'sm':sm,'opt':opt,
'm':np.mean(y),'sd':np.std(y),'cbin':cbin,'fbin':fbin,
'f_max':f_max, 'f_lmax':f_lmax, 'c_cog': px}
# returns local and max peak frequencies
# IN:
# x: array of abs coef amplitudes
# f: corresponding frequencies
# cog: center of gravity
# OUT:
# f_gm: freq of global maximu
# f_lm: array of freq of local maxima
# px: threshold to be superseeded (derived from prct specs)
def dct_peak(x,f,cog,opt):
x = abs(cp.deepcopy(x))
## global maximum
i = myl.find(x,'is','max')
if len(i)>1:
i=int(np.mean(i))
f_gm = float(f[i])
## local maxima
# threshold to be superseeded
px = dct_px(x,f,cog,opt)
idx = myl.find(x,'>=',px)
# 2d array of neighboring+1 indices
# e.g. [[0,1,2],[5,6],[9,10]]
ii = []
# min freq distance between maxima
fd_min = 1
for i in myl.idx(idx):
if len(ii)==0:
ii.append([idx[i]])
elif idx[i]>ii[-1][-1]+1:
xi = x[ii[-1]]
fi = f[ii[-1]]
j = myl.find(xi,'is','max')
#print('xi',xi,'fi',fi,'f',f[idx[i]])
if len(j)>0 and f[idx[i]]>fi[j[0]]+fd_min:
#print('->1')
ii.append([idx[i]])
else:
#print('->2')
ii[-1].append(idx[i])
#myl.stopgo() #!c
else:
ii[-1].append(idx[i])
# get index of x maximum within each subsegment
# and return corresponding frequencies
f_lm = []
for si in ii:
zi = myl.find(x[si],'is','max')
if len(zi)>1:
zi=int(np.mean(zi))
else:
zi = zi[0]
i = si[zi]
if not np.isnan(i):
f_lm.append(f[i])
#print('px',px)
#print('i',ii)
#print('x',x)
#print('f',f)
#print('m',f_gm,f_lm)
#myl.stopgo()
return f_gm, f_lm, px
# return center-of-gravity related amplitude
# IN:
# x: array of coefs
# f: corresponding freqs
# cog: center of gravity freq
# opt
# OUT:
# coef amplitude related to cog
def dct_px(x,f,cog,opt):
x = abs(cp.deepcopy(x))
# cog outside freq range
if cog <= f[0]:
return x[0]
elif cog >= f[-1]:
return x[-1]
# find f-indices adjacent to cog
for i in range(len(f)-1):
if f[i] == cog:
return x[i]
elif f[i+1] == cog:
return x[i+1]
elif f[i] < cog and f[i+1] > cog:
# interpolate
#xi = np.interp(cog,f[i:i+2],x[i:i+2])
#print('cog:',cog,'xi',f[i:i+2],x[i:i+2],'->',xi)
return np.interp(cog,f[i:i+2],x[i:i+2])
return np.percentile(x,opt['peak_prct'])
# pre-emphasis
# alpha > 1 (interpreted as lower cutoff freq)
# alpha <- exp(-2 pi alpha delta)
# s'[n] = s[n]-alpha*s[n-1]
# IN:
# signal
# alpha - s[n-1] weight <0.95>
# fs - sample rate <-1>
# do_scale - <FALSE> if TRUE than the pre-emphasized signal is scaled to
# same abs_mean value as original signal (in general pre-emphasis
# leads to overall energy loss)
def pre_emphasis(y,a=0.95,fs=-1,do_scale=False):
# determining alpha directly or from cutoff freq
if a>1:
if fs <= 0:
print('pre emphasis: alpha cannot be calculated deltaT. Set to 0.95')
a = 0.95
else:
a = math.exp(-2*math.pi*a*1/fs)
#print('alpha',a)
# shifted signal
ype = np.append(y[0], y[1:] - a * y[:-1])
# scaling
if do_scale:
sf = np.mean(abs(y))/np.mean(abs(ype))
ype*=sf
## plot
#ys = y[30000:40000]
#ypes = ype[30000:40000]
#t = np.linspace(0,len(ys),len(ys))
#fig, spl = plt.subplots(2,1,squeeze=False)
#cid1 = fig.canvas.mpl_connect('button_press_event', onclick_next)
#cid2 = fig.canvas.mpl_connect('key_press_event', onclick_exit)
#spl[0,0].plot(t,ys)
#spl[1,0].plot(t,ypes)
#plt.show()
##
return ype
# frequency bins: symmetric 2-Hz windows around freq integers
# in bandpass overlapped by 1 Hz
# IN:
# f - ndarray frequencies
# c - ndarray coefs
# opt['lb'] - lower and upper truncation freqs
# ['ub']
# OUT:
# fbin - ndarray, lower bnd of freq bins
# cbin - ndarray, summed abs coef values in these bins
def dct_fbin(f,c,opt):
fb = myl.idx_seg(math.floor(opt['lb']),math.ceil(opt['ub']))
cbin = np.zeros(len(fb)-1);
for j in myl.idx_a(len(fb)-1):
k = myl.intersect(myl.find(f,'>=',fb[j]),
myl.find(f,'<=',fb[j+1]))
cbin[j] = sum(abs(c[k]))
fbin = fb[myl.idx_a(len(fb)-1)]
return fbin, cbin
# spectral moments
# IN:
# c - ndarray, coefficients
# f - ndarray, related frequencies <1:len(c)>
# n - number of spectral moments <3>
# OUT:
# m - ndarray moments (increasing)
def specmom(c,f=[],n=3):
if len(f)==0:
f = myl.idx_a(len(c))+1
c = abs(c)
s = sum(c)
k=0;
m = np.asarray([])
for i in myl.idx_seg(1,n):
m = myl.push(m, sum(c*((f-k)**i))/s)
k = m[-1]
return m
# wrapper around IDCT
# IN:
# c - coef vector derived by dct
# i - indices of coefs to be taken for IDCT; if empty (default),
# all coefs taken)
# OUT:
# y - IDCT result
def idct_bp(c,i=myl.ea()):
if len(i)==0:
return sf.idct(c,norm='ortho')
cr = np.zeros(len(c))
cr[i]=c[i]
return sf.idct(cr)
# mean abs error from IDCT
def dct_mae(c,i,y):
cr = np.zeros(len(c))
cr[i]=c[i]
yr = sf.idct(cr)
return myl.mae(yr,y)
# indices to truncate DCT output to freq band
# IN:
# f - ndarray, all frequencies
# ci - all indices of coef ndarray
# opt['lb'] - lower cutoff freq
# ['ub'] - upper cutoff freq
# OUT:
# i - ndarray, indices in F of elements to be kept
def dct_trunc(f,ci,opt):
if opt['lb']>0:
ihp = myl.find(f,'>=',opt['lb'])
else:
ihp = ci
if opt['ub']>0:
ilp = myl.find(f,'<=',opt['ub'])
else:
ilp = ci
return myl.intersect(ihp,ilp)
# wrapper around wavread and energy calculation
# IN:
# f: wavFileName (any number of channels) or array containing
# the signal (any number of channels=columns)
# opt: energy extraction and postprocessing
# .win, .wintyp, .winparam: window parameters
# .sts: stepsize for energy contour
# .do_preproc: centralizing signal
# .do_out: remove outliers
# .do_interp: linear interpolation over silence
# .do_smooth: smoothing (median or savitzky golay)
# .out dict; see pp_outl()
# .smooth dict; see pp_smooth()
# fs: <-1> needed if f is array
# OUT:
# y: time + energy contour 2-dim np.array
# (1st column: time, other columns: energy)
def wrapper_energy(f,opt = {}, fs = -1):
opt = myl.opt_default(opt,{'wintyp':'hamming',
'winparam':'',
'sts':0.01,
'win':0.05,
'do_preproc': True,
'do_out': False,
'do_interp': False,
'do_smooth': False,
'out': {},
'smooth': {}})
opt['out'] = myl.opt_default(opt['out'], {'f': 3,
'm': 'mean'})
opt['smooth'] = myl.opt_default(opt['smooth'],{"mtd": "sgolay",
"win": 7,
"ord": 3})
if type(f) is str:
s, fs = wavread(f,opt)
else:
if fs < 0:
sys.exit("array input requires sample rate fs. Exit.")
s = f
opt['fs']=fs
# convert to 2-dim array; each column represents a channel
if np.ndim(s)==1:
s = np.expand_dims(s, axis=1)
# output (.T-ed later, reserve first list for time)
y = myl.ea()
# over channels
for i in np.arange(0,s.shape[1]):
e = sig_energy(s[:,i],opt)
# setting outlier to 0
if opt['do_out']:
e = pp_outl(e,opt['out'])
# interpolation over 0
if opt['do_interp']:
e = pp_interp(e)
# smoothing
if opt['do_smooth']:
e = pp_smooth(e,opt['smooth'])
# <0 -> 0
e[myl.find(e,'<',0)]=0
y = myl.push(y,e)
# output
if np.ndim(y)==1:
y = np.expand_dims(y, axis=1)
else:
y = y.T
# concat time as 1st column
sts = opt['sts']
t = np.arange(0,sts*y.shape[0],sts)
if len(t) != y.shape[0]:
while len(t) > y.shape[0]:
t = t[0:len(t)-1]
while len(t) < y.shape[0]:
t = np.append(t,t[-1]+sts)
t = np.expand_dims(t, axis=1)
y = np.concatenate((t,y),axis=1)
return y
### replacing outliers by 0 ###################
def pp_outl(y,opt):
if "m" not in opt:
return y
# ignore zeros
opt['zi'] = True
io = myl.outl_idx(y,opt)
if np.size(io)>0:
y[io] = 0
return y
### interpolation over 0 (+constant extrapolation) #############
def pp_interp(y,opt={}):
xi = myl.find(y,'==',0)
xp = myl.find(y,'>',0)
yp = y[xp]
if "kind" in opt:
f = interpolate.interp1d(xp,yp,kind=opt["kind"],
fill_value=(yp[0],yp[-1]))
yi = f(xi)
else:
yi = np.interp(xi,xp,yp)
y[xi]=yi
return y
#!check
### smoothing ########################################
# remark: savgol_filter() causes warning
# Using a non-tuple sequence for multidimensional indexing is deprecated
# will be out with scipy.signal 1.2.0
# (https://github.com/scipy/scipy/issues/9086)
def pp_smooth(y,opt):
if opt['mtd']=='sgolay':
if len(y) <= opt['win']:
return y
y = sis.savgol_filter(y,opt['win'],opt['ord'])
elif opt['mtd']=='med':
y = sis.medfilt(y,opt['win'])
return y
# calculates energy contour from acoustic signal
# do_preproc per default False. If not yet preprocessed by myl.sig_preproc()
# set to True
# IN:
# x ndarray signal
# opt['fs'] - sample frequency
# ['wintyp'] - <'hamming'>, any type supported by
# scipy.signal.get_window()
# ['winparam'] - <''> additionally needed window parameters,
# scalar, string, list ...
# ['sts'] - stepsize of moving window
# ['win'] - window length
# OUT:
# y ndarray energy contour
def sig_energy(x,opt):
dflt={'wintyp':'hamming','winparam':'','sts':0.01,'win':0.05}
opt = myl.opt_default(opt,dflt)
# stepsize and winlength in samples
sts = round(opt['sts']*opt['fs'])
win = min([math.floor(len(x)/2),round(opt['win']*opt['fs'])])
# weighting window
w = sig_window(opt['wintyp'],win,opt['winparam'])
# energy values
y = np.asarray([])
for j in myl.idx_a(len(x)-win,sts):
s = x[j:j+len(w)]*w
y = myl.push(y,myl.rmsd(s))
return y
# wrapper around windows
# IN:
# typ: any type supported by scipy.signal.get_window()
# lng: <1> length
# par: <''> additional parameters as string, scalar, list etc
# OUT:
# window array
def sig_window(typ,l=1,par=''):
if typ=='none' or typ=='const':
return np.ones(l)
if ((type(par) is str) and (len(par) == 0)):
return sis.get_window(typ,l)
return sis.get_window((typ,par),l)
# pause detection
# IN:
# s - mono signal
# opt['fs'] - sample frequency
# ['ons'] - idx onset <0> (to be added to time output)
# ['flt']['f'] - filter options, boundary frequencies in Hz
# (2 values for btype 'band', else 1): <8000> (evtl. lowered by fu_filt())
# ['btype'] - <'band'>|'high'|<'low'>
# ['ord'] - butterworth order <5>
# ['fs'] - (internally copied)
# ['l'] - analysis window length (in sec)
# ['l_ref'] - reference window length (in sec)
# ['e_rel'] - min energy quotient analysisWindow/referenceWindow
# ['fbnd'] - True|<False> assume pause at beginning and end of file
# ['n'] - <-1> extract exactly n pauses (if > -1)
# ['min_pau_l'] - min pause length <0.5> sec
# ['min_chunk_l'] - min inter-pausal chunk length <0.2> sec
# ['force_chunk'] - <False>, if True, pause-only is replaced by chunk-only
# ['margin'] - <0> time to reduce pause on both sides (sec; if chunks need init and final silence)
# OUT:
# pau['tp'] 2-dim array of pause [on off] (in sec)
# ['tpi'] 2-dim array of pause [on off] (indices in s = sampleIdx-1 !!)
# ['tc'] 2-dim array of speech chunks [on off] (i.e. non-pause, in sec)
# ['tci'] 2-dim array of speech chunks [on off] (indices)
# ['e_ratio'] - energy ratios corresponding to pauses in ['tp'] (analysisWindow/referenceWindow)
def pau_detector(s,opt={}):
if 'fs' not in opt:
sys.exit('pau_detector: opt does not contain key fs.')
dflt = {'e_rel':0.0767,'l':0.1524,'l_ref':5,'n':-1,'fbnd':False,'ons':0,'force_chunk':False,
'min_pau_l':0.4,'min_chunk_l':0.2,'margin':0,
'flt':{'btype':'low','f':np.asarray([8000]),'ord':5}}
opt = myl.opt_default(opt,dflt)
opt['flt']['fs'] = opt['fs']
## removing DC, low-pass filtering
flt = fu_filt(s,opt['flt'])
y = flt['y']
## pause detection for >=n pauses
t, e_ratio = pau_detector_sub(y,opt)
if len(t)>0:
## extending 1st and last pause to file boundaries
if opt['fbnd']==True:
t[0,0]=0
t[-1,-1]=len(y)-1
## merging pauses across too short chunks
## merging chunks across too small pauses
if (opt['min_pau_l']>0 or opt['min_chunk_l']>0):
t, e_ratio = pau_detector_merge(t,e_ratio,opt)
## too many pauses?
# -> subsequently remove the ones with highest e-ratio
if (opt['n']>0 and len(t)>opt['n']):
t, e_ratio = pau_detector_red(t,e_ratio,opt)
## speech chunks
tc = pau2chunk(t,len(y))
## pause-only -> chunk-only
if (opt['force_chunk']==True and len(tc)==0):
tc = cp.deepcopy(t)
t = np.asarray([])
e_ratio = np.asarray([])
## add onset
t = t+opt['ons']
tc = tc+opt['ons']
## return dict
## incl fields with indices to seconds (index+1=sampleIndex)
pau={'tpi':t, 'tci':tc, 'e_ratio': e_ratio}
pau['tp'] = myl.idx2sec(t,opt['fs'])
pau['tc'] = myl.idx2sec(tc,opt['fs'])
#print(pau)
return pau
# merging pauses across too short chunks
# merging chunks across too small pauses
# IN:
# t [[on off]...] of pauses
# e [e_rat ...]
# OUT:
# t [[on off]...] merged
# e [e_rat ...] merged (simply mean of merged segments taken)
def pau_detector_merge(t,e,opt):
## min pause and chunk length in samples
mpl = myl.sec2smp(opt['min_pau_l'],opt['fs'])
mcl = myl.sec2smp(opt['min_chunk_l'],opt['fs'])
## merging chunks across short pauses
tm = np.asarray([])
em = np.asarray([])
for i in myl.idx_a(len(t)):
if ((t[i,1]-t[i,0] >= mpl) or
(opt['fbnd']==True and (i==0 or i==len(t)-1))):
tm = myl.push(tm,t[i,:])
em = myl.push(em,e[i])
# nothing done in previous step?
if len(tm)==0:
tm = cp.deepcopy(t)
em = cp.deepcopy(e)
if len(tm)==0:
return t, e
## merging pauses across short chunks
tn = np.asarray([tm[0,:]])
en = np.asarray([em[0]])
if (tn[0,0]<mcl): tn[0,0]=0
for i in np.arange(1,len(tm),1):
if (tm[i,0] - tn[-1,1] < mcl):
tn[-1,1] = tm[i,1]
en[-1] = np.mean([en[-1],em[i]])
else:
tn = myl.push(tn,tm[i,:])
en = myl.push(en,em[i])
#print("t:\n", t, "\ntm:\n", tm, "\ntn:\n", tn) #!v
return tn, en
# pause to chunk intervals
# IN:
# t [[on off]] of pause segments (indices in signal)
# l length of signal vector
# OUT:
# tc [[on off]] of speech chunks
def pau2chunk(t,l):
if len(t)==0:
return np.asarray([[0,l-1]])
if t[0,0]>0:
tc = np.asarray([[0,t[0,0]-1]])
else:
tc = np.asarray([])
for i in np.arange(0,len(t)-1,1):
if t[i,1] < t[i+1,0]-1:
tc = myl.push(tc,[t[i,1]+1,t[i+1,0]-1])
if t[-1,1]<l-1:
tc = myl.push(tc,[t[-1,1]+1,l-1])
return tc
# called by pau_detector
# IN:
# as for pau_detector
# OUT:
# t [on off]
# e_ratio
def pau_detector_sub(y,opt):
## settings
# reference window span
rl = math.floor(opt['l_ref']*opt['fs'])
# signal length
ls = len(y)
# min pause length
ml = opt['l']*opt['fs']
# global rmse and pause threshold
e_rel = cp.deepcopy(opt['e_rel'])
# global rmse
# as fallback in case reference window is likely to be pause
# almost-zeros excluded (cf percentile) since otherwise pauses
# show a too high influence, i.e. lower the reference too much
# so that too few pauses detected
#e_glob = myl.rmsd(y)
ya = abs(y)
qq = np.percentile(ya,[50])
e_glob = myl.rmsd(ya[ya>qq[0]])
t_glob = opt['e_rel']*e_glob
# stepsize
sts=max([1,math.floor(0.05*opt['fs'])])
# energy calculation in analysis and reference windows
wopt_en = {'win':ml,'rng':[0,ls]}
wopt_ref = {'win':rl,'rng':[0,ls]}
# loop until opt.n criterion is fulfilled
# increasing energy threshold up to 1
while e_rel < 1:
# pause [on off], pause index
t=np.asarray([])
j=0
# [e_y/e_rw] indices as in t
e_ratio= | np.asarray([]) | numpy.asarray |
import os
import numpy as np
from random import shuffle
from collections import namedtuple
from glob import glob
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tf2_module import build_generator, build_discriminator_classifier, softmax_criterion
from tf2_utils import get_now_datetime, save_midis
class Classifier(object):
def __init__(self, args):
self.dataset_A_dir = args.dataset_A_dir
self.dataset_B_dir = args.dataset_B_dir
self.sample_dir = args.sample_dir
self.batch_size = args.batch_size
self.time_step = args.time_step
self.pitch_range = args.pitch_range
self.input_c_dim = args.input_nc # number of input image channels
self.sigma_c = args.sigma_c
self.sigma_d = args.sigma_d
self.lr = args.lr
self.model = args.model
self.generator = build_generator
self.discriminator = build_discriminator_classifier
OPTIONS = namedtuple('OPTIONS', 'batch_size '
'time_step '
'input_nc '
'output_nc '
'pitch_range '
'gf_dim '
'df_dim '
'is_training')
self.options = OPTIONS._make((args.batch_size,
args.time_step,
args.input_nc,
args.output_nc,
args.pitch_range,
args.ngf,
args.ndf,
args.phase == 'train'))
self.now_datetime = get_now_datetime()
self._build_model(args)
print("Initializing classifier...")
def _build_model(self, args):
# build classifier
self.classifier = self.discriminator(self.options,
name='Classifier')
# optimizer
self.classifier_optimizer = Adam(self.lr,
beta_1=args.beta1)
# checkpoints
model_name = "classifier.model"
model_dir = "classifier_{}2{}_{}_{}".format(self.dataset_A_dir,
self.dataset_B_dir,
self.now_datetime,
str(self.sigma_c))
self.checkpoint_dir = os.path.join(args.checkpoint_dir,
model_dir,
model_name)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
self.checkpoint = tf.train.Checkpoint(classifier_optimizer=self.classifier_optimizer,
classifier=self.classifier)
self.checkpoint_manager = tf.train.CheckpointManager(self.checkpoint,
self.checkpoint_dir,
max_to_keep=5)
def train(self, args):
# create training list (origin data with corresponding label)
# Label for A is (1, 0), for B is (0, 1)
dataA = glob('./datasets/{}/train/*.*'.format(self.dataset_A_dir))
dataB = glob('./datasets/{}/train/*.*'.format(self.dataset_B_dir))
labelA = [(1.0, 0.0) for _ in range(len(dataA))]
labelB = [(0.0, 1.0) for _ in range(len(dataB))]
data_origin = dataA + dataB
label_origin = labelA + labelB
training_list = [pair for pair in zip(data_origin, label_origin)]
print('Successfully create training list!')
# create test list (origin data with corresponding label)
dataA = glob('./datasets/{}/test/*.*'.format(self.dataset_A_dir))
dataB = glob('./datasets/{}/test/*.*'.format(self.dataset_B_dir))
labelA = [(1.0, 0.0) for _ in range(len(dataA))]
labelB = [(0.0, 1.0) for _ in range(len(dataB))]
data_origin = dataA + dataB
label_origin = labelA + labelB
testing_list = [pair for pair in zip(data_origin, label_origin)]
print('Successfully create testing list!')
data_test = [np.load(pair[0]) * 2. - 1. for pair in testing_list]
data_test = np.array(data_test).astype(np.float32)
gaussian_noise = np.random.normal(0,
self.sigma_c,
[data_test.shape[0],
data_test.shape[1],
data_test.shape[2],
data_test.shape[3]])
data_test += gaussian_noise
label_test = [pair[1] for pair in testing_list]
label_test = np.array(label_test).astype(np.float32).reshape(len(label_test), 2)
if args.continue_train:
if self.checkpoint.restore(self.checkpoint_manager.latest_checkpoint):
print(" [*] Load checkpoint succeeded!")
else:
print(" [!] Load checkpoint failed...")
counter = 1
for epoch in range(args.epoch):
# shuffle the training samples
shuffle(training_list)
# get the correct batch number
batch_idx = len(training_list) // self.batch_size
# learning rate would decay after certain epochs
self.lr = self.lr if epoch < args.epoch_step else self.lr * (args.epoch-epoch) / (args.epoch-args.epoch_step)
for idx in range(batch_idx):
# data samples in batch
batch = training_list[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_data = [np.load(pair[0]) * 2. - 1. for pair in batch]
batch_data = np.array(batch_data).astype(np.float32)
# data labels in batch
batch_label = [pair[1] for pair in batch]
batch_label = np.array(batch_label).astype(np.float32).reshape(len(batch_label), 2)
with tf.GradientTape(persistent=True) as tape:
# Origin samples passed through the classifier
origin = self.classifier(batch_data,
training=True)
test = self.classifier(data_test,
training=True)
# loss
loss = softmax_criterion(origin, batch_label)
# test accuracy
test_softmax = tf.nn.softmax(test)
test_prediction = tf.equal(tf.argmax(test_softmax, 1), tf.argmax(label_test, 1))
test_accuracy = tf.reduce_mean(tf.cast(test_prediction, tf.float32))
# calculate gradients
classifier_gradients = tape.gradient(target=loss,
sources=self.classifier.trainable_variables)
# apply gradients to the optimizer
self.classifier_optimizer.apply_gradients(zip(classifier_gradients,
self.classifier.trainable_variables))
if idx % 100 == 0:
print('=================================================================')
print(("Epoch: [%2d] [%4d/%4d] loss: %6.2f, accuracy: %6.2f" %
(epoch, idx, batch_idx, loss, test_accuracy)))
counter += 1
print('=================================================================')
print(("Epoch: [%2d] loss: %6.2f, accuracy: %6.2f" % (epoch, loss, test_accuracy)))
# save the checkpoint per epoch
self.checkpoint_manager.save(epoch)
def test(self, args):
# load the origin samples in npy format and sorted in ascending order
sample_files_origin = glob('./test/{}2{}_{}_{}_{}/{}/npy/origin/*.*'.format(self.dataset_A_dir,
self.dataset_B_dir,
self.model,
self.sigma_d,
self.now_datetime,
args.which_direction))
sample_files_origin.sort(key=lambda x: int(os.path.splitext(os.path.basename(x))[0].split('_')[0]))
# load the origin samples in npy format and sorted in ascending order
sample_files_transfer = glob('./test/{}2{}_{}_{}_{}/{}/npy/transfer/*.*'.format(self.dataset_A_dir,
self.dataset_B_dir,
self.model,
self.sigma_d,
self.now_datetime,
args.which_direction))
sample_files_transfer.sort(key=lambda x: int(os.path.splitext(os.path.basename(x))[0].split('_')[0]))
# load the origin samples in npy format and sorted in ascending order
sample_files_cycle = glob('./test/{}2{}_{}_{}_{}/{}/npy/cycle/*.*'.format(self.dataset_A_dir,
self.dataset_B_dir,
self.model,
self.sigma_d,
self.now_datetime,
args.which_direction))
sample_files_cycle.sort(key=lambda x: int(os.path.splitext(os.path.basename(x))[0].split('_')[0]))
# put the origin, transfer and cycle of the same phrase in one zip
sample_files = list(zip(sample_files_origin,
sample_files_transfer,
sample_files_cycle))
if self.checkpoint.restore(self.checkpoint_manager.latest_checkpoint):
print(" [*] Load checkpoint succeeded!")
else:
print(" [!] Load checkpoint failed...")
# create a test path to store the generated sample midi files attached with probability
test_dir_mid = os.path.join(args.test_dir, '{}2{}_{}_{}_{}/{}/mid_attach_prob'.format(self.dataset_A_dir,
self.dataset_B_dir,
self.model,
self.sigma_d,
self.now_datetime,
args.which_direction))
if not os.path.exists(test_dir_mid):
os.makedirs(test_dir_mid)
count_origin = 0
count_transfer = 0
count_cycle = 0
line_list = []
for idx in range(len(sample_files)):
print('Classifying midi: ', sample_files[idx])
# load sample phrases in npy formats
origin = np.load(sample_files[idx][0])
transfer = np.load(sample_files[idx][1])
cycle = np.load(sample_files[idx][2])
# get the probability for each sample phrase
origin_softmax = tf.nn.softmax(self.classifier(origin * 2. - 1.,
training=False))
transfer_softmax = tf.nn.softmax(self.classifier(transfer * 2. - 1.,
training=False))
cycle_softmax = tf.nn.softmax(self.classifier(cycle * 2. - 1.,
training=False))
origin_transfer_diff = np.abs(origin_softmax - transfer_softmax)
content_diff = np.mean((origin * 1.0 - transfer * 1.0) ** 2)
# labels: (1, 0) for A, (0, 1) for B
if args.which_direction == 'AtoB':
line_list.append((idx + 1,
content_diff,
origin_transfer_diff[0][0],
origin_softmax[0][0],
transfer_softmax[0][0],
cycle_softmax[0][0]))
# for the accuracy calculation
count_origin += 1 if np.argmax(origin_softmax[0]) == 0 else 0
count_transfer += 1 if np.argmax(transfer_softmax[0]) == 0 else 0
count_cycle += 1 if np.argmax(cycle_softmax[0]) == 0 else 0
# create paths for origin, transfer and cycle samples attached with probability
path_origin = os.path.join(test_dir_mid, '{}_origin_{}.mid'.format(idx + 1,
origin_softmax[0][0]))
path_transfer = os.path.join(test_dir_mid, '{}_transfer_{}.mid'.format(idx + 1,
transfer_softmax[0][0]))
path_cycle = os.path.join(test_dir_mid, '{}_cycle_{}.mid'.format(idx + 1,
cycle_softmax[0][0]))
else:
line_list.append((idx + 1,
content_diff,
origin_transfer_diff[0][1],
origin_softmax[0][1],
transfer_softmax[0][1],
cycle_softmax[0][1]))
# for the accuracy calculation
count_origin += 1 if | np.argmax(origin_softmax[0]) | numpy.argmax |
import math
from typing import List
import pytest
import numpy as np
from flaky import flaky
from fiesta_test import model_generator, split_data
from fiesta.util import pull_arm, belief_calc, lists_same_size, fc_func_stats
from fiesta.util import fb_func_stats
@pytest.mark.parametrize("mean", (2, 3, 10, 0.7))
@pytest.mark.parametrize("sd", (0.2, 0.31, 1.4))
def test_pull_arm(mean, sd):
values: List[float] = []
for _ in range(0, 10):
values.append(pull_arm(mean, sd))
# the values from a normal distribution should be within 3 standard
# deviations away 99.7% of the time.
not_in_limit = 0
sd_3 = 3 * sd
upper_limit = mean + sd_3
lower_limit = mean - sd_3
for value in values:
if value > upper_limit or value < lower_limit:
print(value)
print(upper_limit)
print(lower_limit)
print(mean)
print(sd)
print(sd_3)
not_in_limit += 1
assert not_in_limit < 2
def test_belief_calc():
# Case where there are 3 very distinct models
est_means = | np.array([10, 3, 8]) | numpy.array |
import pickle
import numpy as np
import math
from numba import jit
sr=500 # Hz
win=1 # seconds
step=0.5 # seconds
nsta = 69 # number of stations
with open('ptraveltimes_table.p' , 'rb') as f:
comein=pickle.load(f)
ptraveltimes = comein[0]
with open('straveltimes_table.p' , 'rb') as f:
comein=pickle.load(f)
straveltimes = comein[0]
st=[]
n=0
for i in range(0,12420):
with open('./H3/data/trace' + str(n) + '.p', 'rb') as f:
comein=pickle.load(f)
st.append(comein[0])
n=n+1
##############################################################
# vn and hn are normalized seismograms
vn=[]
for i in range(0,69):
vv=[]
for j in range(0,60):
data = st[j*69*3+i].data
data = data-np.mean(data)
data = np.abs(data)
if math.isnan(np.median(data)) == True or np.median(data) == 0:
data = np.ones(len(data))
else:
data = data/np.median(data)
data = data ** (1/3)
vv.append(data)
vn.append(np.concatenate(vv))
hn = []
for i,ii in zip(range(69, 69*2),range(69*2, 69*3)):
vv=[]
for j in range(0, 60):
data1 = st[j * 69 * 3 + i].data
data1 = data1 - | np.mean(data1) | numpy.mean |
import numpy as np
import matplotlib.pyplot as plt
with open("param_1.5.3_2D.txt", "r") as f:
line = f.readline()
line = line.split()
Nx, Nz = int(line[0]), int(line[1])
line = f.readline()
line = line.split()
Lx, Lz = float(line[0]), float(line[1])
print(Nx, Nz, Lx, Lz)
n_cores = 4
tempo_vec = []
topo_vec = []
for cont in range(0, 10000, 10):
try:
if n_cores > 1:
tempo = np.loadtxt(
"time_" + str(cont) + ".txt",
unpack=True,
delimiter=":",
usecols=(1),
)
tempo = tempo[0]
else:
tempo = np.loadtxt(
"time_" + str(cont) + ".txt", unpack=True, delimiter=":"
)
except:
print("Step %d not found" % (cont))
break
x = []
z = []
c = []
s = []
for rank in range(n_cores):
x1, z1, c0, c1, c2 = np.loadtxt(
"step_" + str(cont) + "_" + str(rank) + ".txt", unpack=True
)
cor = (0, 0, 0)
cor2 = (0, 0, 0)
cor3 = (0, 0, 0)
# print(cor)
c = np.append(c, c0)
x = np.append(x, x1)
z = np.append(z, z1)
s = np.append(s, c1)
x = x[c > 9999]
z = z[c > 9999]
c = c[c > 9999]
s = s[c > 9999]
if cont == 0:
condition = (z > -155.0e3) & (z < -150.0e3)
else:
condition = (s == 3) & (z > -170.0e3) & (z < -140.0e3)
xx = x[condition]
zz = z[condition]
cc = c[condition]
ic = | np.argsort(cc) | numpy.argsort |
import numpy as np
from gurobipy import *
import time
import os
import sys
from Utilities import CalcRhoAndBetaVectors
from UtilitiesOptimization import CalculateLPGurobi, CalculateQuadGurobi,\
SubgrAlgSavPrimDualObjInd, SubgrAlgSavPrimDualObjFn_L2Ind, ExtendSizeCamps, OptimalBids, OptimalX
#
#
## Simulation Code
## To make the simulation code faster and easier to read (in particular the greedy heuristic)
## we would like to change the vector of click-through rates 'vector_ctr', vector of
## revenues 'vector_q', of revenue times the click trhough rate 'vector_qctr', and others
## into a matrix of size number of campaigns times number of impressions.
## That's done in the following function.
def CreateMatR_ctr_Rctr_Rhoctr(numCampaigns, num_impressions, num_edges, \
index_Imps, index_sizeCamps, vector_q, vector_ctr, \
vector_qctr, PPFTable, numericBeta):
## mat_r_by_Imp is a matrix in which each column 'i'
## represents the valuations of all campaigns in order for
## an impression of type 'i'. If the campaign is not interested
## in the impression a zero value is entered in that position.
mat_r_by_Imp=np.zeros((numCampaigns, num_impressions))
mat_ctr=np.zeros((numCampaigns, num_impressions))
mat_rctr_by_Imp=np.zeros((numCampaigns, num_impressions))
mat_rhoctr_by_Imp=np.zeros((numCampaigns, num_impressions))
mat_rctrBetarho_by_Imp=np.zeros((numCampaigns, num_impressions))
[rho_rctr, beta_rctr]=CalcRhoAndBetaVectors(vector_qctr, num_edges, \
index_Imps, PPFTable, numericBeta)
for i in range(num_impressions):
count=0
aux=0
indexes=np.arange(num_edges)[(index_Imps==i)]
sizeIndexes=len(indexes)
if(sizeIndexes!=0):
pos=indexes[aux]
for j in range(numCampaigns):
impInCamp=index_sizeCamps[j]
if (pos<(count+impInCamp)):
mat_r_by_Imp[j, i]=vector_q[pos]
mat_ctr[j, i]=vector_ctr[pos]
mat_rctr_by_Imp[j, i]=vector_qctr[pos]
mat_rhoctr_by_Imp[j, i]=rho_rctr[pos]
mat_rctrBetarho_by_Imp[j, i] =(vector_qctr[pos]-\
beta_rctr[pos])*rho_rctr[pos]
if(aux<sizeIndexes-1):
aux+=1
pos=indexes[aux]
else:
# No more campaigns use that impression
pos=num_edges
count+=impInCamp
return [mat_r_by_Imp, mat_ctr, mat_rctr_by_Imp, mat_rhoctr_by_Imp, \
mat_rctrBetarho_by_Imp]
# ### Greedy Heuristic Procedure
# When the greedy heuristic has the opportunity to bid for given impression type
# it first check the budget to see which of the interested campaigns has enough
# money to pay in case a click is done and then it bids for the campaign
# that maximizes the profit. Given that Ipinyou assumes second price auctions,
# the greedy heuristic bids for the campaign with highest revenue times ctr
# that still has enough money to pay for the impression in case of winning.
# 'CreateMatrixBidAndX' transforms bid and allocation vectors into matrices. This code
# will be used by all methods in the simulation step as we will obtain bidding and
# allocation vectors for Indicator and Indicator + $\ell_2$ once we run our primal-dual
# methodology and the greedy step has bidding prices equal to $r_{ik}$. Given that we run
# our primal-dual methodogy only once per simulation (which is clearly sub-optimal), the
# allocation vector is enough to decide in behalf of which campaign to bid in behalf of
# for a whole simulation.
def CreateMatrixBidAndX(numCampaigns, num_impressions, num_edges, \
index_Imps, index_sizeCamps, bid_vector, x):
mat_bid_by_Imp=np.zeros((numCampaigns, num_impressions))
mat_x_by_Imp=np.zeros((numCampaigns, num_impressions))
for i in range(num_impressions):
count=0
aux=0
indexes=np.arange(num_edges)[(index_Imps==i)]
sizeIndexes=len(indexes)
if(sizeIndexes!=0):
pos=indexes[aux]
for j in range(numCampaigns):
impInCamp=index_sizeCamps[j]
if (pos<(count+impInCamp)):
mat_bid_by_Imp[j, i]=bid_vector[pos]
mat_x_by_Imp[j, i]=x[pos]
if(aux<sizeIndexes-1):
aux+=1
pos=indexes[aux]
else:
# No more campaigns use that impression
# This should be done with a while.
pos=num_edges
count+=impInCamp
return [mat_bid_by_Imp, mat_x_by_Imp]
# For each impression type $i$, the probability of not bidding for it is
# $$1-\sum_{k \in \mathcal{K}_i} x_{ik}$$ We obtain the vector of probaility of not bidding
# for each impression type in CreateProbOfBidding. In Case we decide to bid for a given
# impression type, FastRandomChoice helps us to decide for which campaign to bid in behalf of.
# It receives as inputs the vector 'condProbVector' which represent the probability of bidding
# in behalf of the camapaigns that have enough budget to bid (this vector entries are
# non-negative and sum up to one), and a number 'unif_value' which we assumed was sampled
# for a uniform random variable. Then, it uses a standard trick to decide on behalf of
# which campaign to bid in behalf of. The campaign number which returns is relative to the
# campaigns that have enough budget to bid on behalf of.
def CreateProbOfBidding(mat_x_by_Imp):
return np.sum(mat_x_by_Imp, axis=0)
def FastRandomChoice(condProbVector, unif_value):
auxPartSum=0.0
for i in range(len(condProbVector)):
if auxPartSum+condProbVector[i]<unif_value:
return i
else:
auxPartSum+=condProbVector[i]
return len(condProbVector)-1
### Initializing data for each method (Greedy and derived from our method)
# When we run a simulation we would like to save by campaign the amount of bids made,
# won, and clicks made by each impression type. That info is saved in cartBids, cartWon,
# and cartClicked resp. Also, as general statistics we would like to know the cost, revenue
# and profit each impression type brough for the DSP. That info is saved in costBids, revenue,
# and profit resp.
# Function CreateDataForSimulation creates all the data needed to tart the simulation for the
# Greedy and a non-Greedy method.
def CreateIndicatorSimulation(numCampaigns, num_impressions, vector_m):
budget=np.zeros(numCampaigns)
budget[:]=vector_m
cartBids=np.zeros((numCampaigns, num_impressions))
cartWon=np.zeros((numCampaigns, num_impressions))
cartClicked=np.zeros((numCampaigns, num_impressions))
costBids=np.zeros(num_impressions)
revenue=np.zeros(num_impressions)
profit=np.zeros(num_impressions)
return [budget, cartBids, cartWon, cartClicked, \
costBids, revenue, profit]
def CreateDataForSimulation(bidFound, xFound, numCampaigns, \
num_impressions, num_edges, index_Imps, index_sizeCamps, vector_q, \
vector_ctr, vector_qctr, vector_m, PPFTable, numericBeta):
[budgetLR, cartBidsLR, cartWonLR, cartClickedLR, costBidsLR, revenueLR, \
profitLR]=CreateIndicatorSimulation(numCampaigns, \
num_impressions, vector_m)
[budgetGr, cartBidsGr, cartWonGr, cartClickedGr, costBidsGr, revenueGr, \
profitGr]=CreateIndicatorSimulation(numCampaigns, \
num_impressions, vector_m)
[mat_r_by_Imp, mat_ctrTest, mat_rctr_by_Imp, mat_rhoctr_by_Imp, mat_rctrBetarho_by_Imp]=\
CreateMatR_ctr_Rctr_Rhoctr(numCampaigns, num_impressions, num_edges, \
index_Imps, index_sizeCamps, vector_q, vector_ctr, vector_qctr, PPFTable, numericBeta)
[mat_bid_by_ImpLR, mat_x_by_ImpLR]=CreateMatrixBidAndX(numCampaigns, \
num_impressions, num_edges, index_Imps, index_sizeCamps, \
bidFound, xFound)
probBidLR=CreateProbOfBidding(mat_x_by_ImpLR)
return [budgetLR, cartBidsLR, cartWonLR, cartClickedLR, costBidsLR, \
revenueLR, profitLR, budgetGr, cartBidsGr, cartWonGr, \
cartClickedGr, costBidsGr, revenueGr, profitGr, mat_r_by_Imp, \
mat_ctrTest, mat_rctr_by_Imp, mat_rhoctr_by_Imp, mat_rctrBetarho_by_Imp, \
mat_bid_by_ImpLR, mat_x_by_ImpLR, probBidLR]
# Comments About the Implementation
# - We win an auction only if the bid_amount is higher than the market price that appear in the Ipinyou Log.
# In case of winning the auction we then need to check if a click occurs. We update the revenue, profit,
# budget, cartWon, costBids, and cartClicked accordingly.
# - For the indicator and indicator+$\ell_2$ case we only need to check the allocation vector to decide
# the campaign to bid in behalf of (allocation vector that comes from running other primal-dual procedure).
# Simulation code for Indicator, Indicator + $\ell_2$, and Greedy
def RunIndL2IndAndGreedy(numCampaigns, num_impressions, num_edges, index_Imps, \
index_sizeCamps, PPFTable, numericBeta, vector_q, vector_m, vector_ctrTrain, vector_rctrTrain, \
vector_ctrTest, vector_rctrTest, bidsInd, xInd, bidsL2Ind, xL2Ind, tau, ImpInOrder, MPInOrder,\
impNames, listCampPerImp):
## We first initialize the budgets used, matrices of bids made, won, and clicked for
## three methods.
[budgetInd, cartBidsInd, cartWonInd, cartClickedInd, costBidsInd, \
revenueInd, profitInd, _, _, _, _, _, _, _, _, _, mat_rctr_by_ImpTrain, _, _, \
mat_bid_by_ImpInd, mat_x_by_ImpInd, probBidInd]=CreateDataForSimulation(bidsInd, \
xInd, numCampaigns, num_impressions, num_edges, index_Imps, index_sizeCamps, \
vector_q, vector_ctrTrain, vector_rctrTrain, vector_m, PPFTable, numericBeta)
[budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, cartClickedL2Ind, costBidsL2Ind, \
revenueL2Ind, profitL2Ind, budgetGr, cartBidsGr, cartWonGr, \
cartClickedGr, costBidsGr, revenueGr, profitGr, mat_r_by_Imp, \
mat_ctrTest, _, _, _, \
mat_bid_by_ImpL2Ind, mat_x_by_ImpL2Ind, probBidL2Ind]=CreateDataForSimulation(bidsL2Ind, \
xL2Ind, numCampaigns, num_impressions, num_edges, index_Imps, index_sizeCamps, \
vector_q, vector_ctrTest, vector_rctrTest, vector_m, PPFTable, numericBeta)
## Now we simulate
# campaignsArange=np.arange(numCampaigns)
## Instead of np.random.uniform every time we need a random uniform we call
## the method at the beginnning of the simulation and save all uniform
## samples we need.
allUnifToUse = np.random.uniform(0.0, 1.0, (len(ImpInOrder)*3))
# uniOnline=False
## We read the test log in irder of how the impressions type appear.
for i,clusterId in enumerate(ImpInOrder):
impType=impNames.index(clusterId)
unifs=allUnifToUse[(3*i):(3*(i+1))]
## Market Price that appears in the test log.
mp_value=MPInOrder[i]
## Update Ind
indBuyerInd=0
tryToBidInd=False
bidAmountInd=0.0
## First we check if the method would try to bid for the impression
## or would just discard it immediately
if unifs[0] <= probBidInd[impType]:
## For each campaign we check if there is any that has enough budget to bid and that
## also wants to do so.
bidUsingInd=False
# print('budgetInd[listCampPerImp[impType]]: '+str(budgetInd[listCampPerImp[impType]]))
# aux53 =(mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetInd[listCampPerImp[impType]])
indInterested = (mat_x_by_ImpInd[listCampPerImp[impType],impType]>0) *\
(mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetInd[listCampPerImp[impType]])
if np.sum(indInterested) >0:
bidUsingInd= True
if bidUsingInd:
## There is at least one campaign that wants to bid.
posInt=listCampPerImp[impType][indInterested]
## Conditional probability assuming that the method is going to bid.
## This conditional probability excludes all those campaigns
## that do not want to bid
condProbInterested=mat_x_by_ImpInd[posInt, impType]
condProbInterested*=1.0/np.sum(condProbInterested)
auxPartSum=0.0
## Now we will choose in behalf of which campaign to bid for.
numInterest = len(condProbInterested)
auxPosForindBuyerInd = numInterest-1
z = 0
while z<numInterest:
auxPartSum += condProbInterested[z]
if auxPartSum >= unifs[1]:
## If we exceed unifs[1] go out of the loop
auxPosForindBuyerInd=z
z+=numInterest
z += 1
indBuyerInd=posInt[auxPosForindBuyerInd]
tryToBidInd=True
bidAmountInd=mat_bid_by_ImpInd[indBuyerInd, impType]
## If tryToBidInd == True, we will try to bid inbehalf of campaign indBuyerInd
## bidding an amount of bidAmountInd.
if(tryToBidInd):
## We first register that we are bidding on behalf of indBuyerInd for an
## impression of type impType
cartBidsInd[indBuyerInd, impType]+=1
## We win the auction only if the value we are bidding is higher
## than the market price observed by Ipinyou
if bidAmountInd>= mp_value:
## Impression Won. Register that we won the impression and the change
## in cost and profit.
cartWonInd[indBuyerInd, impType]+=1
costBidsInd[impType]-=mp_value
profitInd[impType]-=mp_value
# Now we need to check if the ad was clicked.
probOfClick=mat_ctrTest[indBuyerInd, impType]
if (unifs[2]<=probOfClick):
## User clicked, increase revenue and charge the campaign (i.e. DSP wins money).
cartClickedInd[indBuyerInd, impType]+=1
payment=mat_r_by_Imp[indBuyerInd, impType]
revenueInd[impType]+=payment
profitInd[impType]+=payment
budgetInd[indBuyerInd]-=payment
## Update L2Ind (Same code as done before for the pure indicator case)
indBuyerL2Ind=0
tryToBidL2Ind=False
bidAmountL2Ind=0.0
if unifs[0] <= probBidL2Ind[impType]:
## For each campaign we check if there is any that has enough budget to bid and that
## also wants to do so.
bidUsingL2Ind=False
indInterested =\
(mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetL2Ind[listCampPerImp[impType]]) * \
(mat_x_by_ImpL2Ind[listCampPerImp[impType],impType]>0)
if np.sum(indInterested) >0:
bidUsingL2Ind= True
if bidUsingL2Ind:
## There is at least one campaign that wants to bid.
posInt=listCampPerImp[impType][indInterested]
## Conditional probability assuming that the method is going to bid.
## This conditional probability excludes all those campaigns
## that do not want to bid
condProbInterested=mat_x_by_ImpL2Ind[posInt, impType]
condProbInterested*=1.0/np.sum(condProbInterested)
auxPartSum=0.0
## Now we will choose in behalf of which campaign to bid for.
numInterest = len(condProbInterested)
auxPosForindBuyerL2Ind = numInterest-1
z = 0
while z <numInterest:
auxPartSum += condProbInterested[z]
if auxPartSum >= unifs[1]:
## If we exceed unifs[1] go out of the loop
auxPosForindBuyerL2Ind=z
z+=numInterest
z += 1
indBuyerL2Ind=posInt[auxPosForindBuyerL2Ind]
tryToBidL2Ind=True
bidAmountL2Ind=mat_bid_by_ImpL2Ind[indBuyerL2Ind, impType]
if(tryToBidL2Ind):
cartBidsL2Ind[indBuyerL2Ind, impType]+=1
if bidAmountL2Ind>= mp_value:
## Impression Won.
cartWonL2Ind[indBuyerL2Ind, impType]+=1
costBidsL2Ind[impType]-=mp_value
profitL2Ind[impType]-=mp_value
# Now we need to check if the ad was clicked.
probOfClick=mat_ctrTest[indBuyerL2Ind, impType]
if (unifs[2]<=probOfClick):
## User clicked, increase revenue and charge the campaign.
cartClickedL2Ind[indBuyerL2Ind, impType]+=1
payment=mat_r_by_Imp[indBuyerL2Ind, impType]
revenueL2Ind[impType]+=payment
profitL2Ind[impType]+=payment
budgetL2Ind[indBuyerL2Ind]-=payment
### Now we update the Greedy Policy
## The greedy heuristic bids for the campaign which stills have remaining
## budget and from thos bid for the one with highest r times ctr.
## The previous is true as Ipinyou assumes second price auctions.
indBuyerGr=-1
bidAmountGr=0.0
tryToBidGr=False
indInterested =\
mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetGr[listCampPerImp[impType]]
if np.sum(indInterested) > 0:
posInt=listCampPerImp[impType][indInterested]
indBuyerGr = posInt[np.argmax(mat_rctr_by_ImpTrain[posInt,impType])]
bidAmountGr=mat_rctr_by_ImpTrain[indBuyerGr, impType]
tryToBidGr=True
## If tryToBidGr == True, we will bid in behalf of campaign 'indBuyerGr'
## the amount 'bidAmountGr'
if (tryToBidGr):
## Save that we are bidding in behalf of 'indBuyerGr' for an impression of
## type 'impType'
cartBidsGr[indBuyerGr, impType]+=1
## We win the auction only if the value we are bidding is higher
## than the market price observed by Ipinyou.
if bidAmountGr>= mp_value:
## Impression Won.
cartWonGr[indBuyerGr, impType]+=1
costBidsGr[impType]-=mp_value
profitGr[impType]-=mp_value
# Now we need to check if the ad was clicked.
probOfClick=mat_ctrTest[indBuyerGr, impType]
if (unifs[2]<=probOfClick):
## User clicked, increase revenue and charge the campaign.
cartClickedGr[indBuyerGr, impType]+=1
payment=mat_r_by_Imp[indBuyerGr, impType]
revenueGr[impType]+=payment
profitGr[impType]+=payment
budgetGr[indBuyerGr]-=payment
return [budgetInd, cartBidsInd, cartWonInd, cartClickedInd, costBidsInd, \
revenueInd, profitInd, budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, \
cartClickedL2Ind, costBidsL2Ind, revenueL2Ind, profitL2Ind, budgetGr, \
cartBidsGr, cartWonGr, cartClickedGr, costBidsGr, revenueGr, profitGr]
def RunInd_L2_L2Ind_Greedy(numCampaigns, num_impressions, num_edges, index_Imps, \
index_sizeCamps, PPFTable, numericBeta, vector_q, vector_m, vector_ctrTrain, vector_rctrTrain, \
vector_ctrTest, vector_rctrTest, bidsInd, xInd, bidsL2, xL2, bidsL2Ind, xL2Ind, tau,\
ImpInOrder, MPInOrder, impNames, listCampPerImp):
## We first initialize the budgets used, matrices of bids made, won, and clicked for
## three methods.
[budgetInd, cartBidsInd, cartWonInd, cartClickedInd, costBidsInd, \
revenueInd, profitInd, _, _, _, _, _, _, _, _, _, mat_rctr_by_ImpTrain, _, _, \
mat_bid_by_ImpInd, mat_x_by_ImpInd, probBidInd]=CreateDataForSimulation(bidsInd, \
xInd, numCampaigns, num_impressions, num_edges, index_Imps, index_sizeCamps, \
vector_q, vector_ctrTrain, vector_rctrTrain, vector_m, PPFTable, numericBeta)
[budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, cartClickedL2Ind, costBidsL2Ind, \
revenueL2Ind, profitL2Ind, budgetGr, cartBidsGr, cartWonGr, \
cartClickedGr, costBidsGr, revenueGr, profitGr, mat_r_by_Imp, \
mat_ctrTest, _, _, _, \
mat_bid_by_ImpL2Ind, mat_x_by_ImpL2Ind, probBidL2Ind]=CreateDataForSimulation(bidsL2Ind, \
xL2Ind, numCampaigns, num_impressions, num_edges, index_Imps, index_sizeCamps, \
vector_q, vector_ctrTest, vector_rctrTest, vector_m, PPFTable, numericBeta)
[budgetL2, cartBidsL2, cartWonL2, cartClickedL2, costBidsL2, \
revenueL2, profitL2, _, _, _, _, _, _, _, _, _, _, _, _, \
mat_bid_by_ImpL2, mat_x_by_ImpL2, probBidL2]=CreateDataForSimulation(bidsL2, \
xL2, numCampaigns, num_impressions, num_edges, index_Imps, index_sizeCamps, \
vector_q, vector_ctrTest, vector_rctrTest, vector_m, PPFTable, numericBeta)
## Now we simulate
# campaignsArange=np.arange(numCampaigns)
## Instead of np.random.uniform every time we need a random uniform we call
## the method at the beginnning of the simulation and save all uniform
## samples we need.
allUnifToUse = np.random.uniform(0.0, 1.0, (len(ImpInOrder)*3))
# uniOnline=False
## We read the test log in irder of how the impressions type appear.
for i,clusterId in enumerate(ImpInOrder):
impType=impNames.index(clusterId)
unifs=allUnifToUse[(3*i):(3*(i+1))]
## Market Price that appears in the test log.
mp_value=MPInOrder[i]
## Update Ind
indBuyerInd=0
tryToBidInd=False
bidAmountInd=0.0
## First we check if the method would try to bid for the impression
## or would just discard it immediately
if unifs[0] <= probBidInd[impType]:
## For each campaign we check if there is any that has enough budget to bid and that
## also wants to do so.
bidUsingInd=False
# print('budgetInd[listCampPerImp[impType]]: '+str(budgetInd[listCampPerImp[impType]]))
# aux53 =(mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetInd[listCampPerImp[impType]])
indInterested = (mat_x_by_ImpInd[listCampPerImp[impType],impType]>0) *\
(mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetInd[listCampPerImp[impType]])
if np.sum(indInterested) >0:
bidUsingInd= True
if bidUsingInd:
## There is at least one campaign that wants to bid.
posInt=listCampPerImp[impType][indInterested]
## Conditional probability assuming that the method is going to bid.
## This conditional probability excludes all those campaigns
## that do not want to bid
condProbInterested=mat_x_by_ImpInd[posInt, impType]
condProbInterested*=1.0/np.sum(condProbInterested)
auxPartSum=0.0
## Now we will choose in behalf of which campaign to bid for.
numInterest = len(condProbInterested)
auxPosForindBuyerInd = numInterest-1
z = 0
while z<numInterest:
auxPartSum += condProbInterested[z]
if auxPartSum >= unifs[1]:
## If we exceed unifs[1] go out of the loop
auxPosForindBuyerInd=z
z+=numInterest
z += 1
indBuyerInd=posInt[auxPosForindBuyerInd]
tryToBidInd=True
bidAmountInd=mat_bid_by_ImpInd[indBuyerInd, impType]
## If tryToBidInd == True, we will try to bid inbehalf of campaign indBuyerInd
## bidding an amount of bidAmountInd.
if(tryToBidInd):
## We first register that we are bidding on behalf of indBuyerInd for an
## impression of type impType
cartBidsInd[indBuyerInd, impType]+=1
## We win the auction only if the value we are bidding is higher
## than the market price observed by Ipinyou
if bidAmountInd>= mp_value:
## Impression Won. Register that we won the impression and the change
## in cost and profit.
cartWonInd[indBuyerInd, impType]+=1
costBidsInd[impType]-=mp_value
profitInd[impType]-=mp_value
# Now we need to check if the ad was clicked.
probOfClick=mat_ctrTest[indBuyerInd, impType]
if (unifs[2]<=probOfClick):
## User clicked, increase revenue and charge the campaign (i.e. DSP wins money).
cartClickedInd[indBuyerInd, impType]+=1
payment=mat_r_by_Imp[indBuyerInd, impType]
revenueInd[impType]+=payment
profitInd[impType]+=payment
budgetInd[indBuyerInd]-=payment
## Update L2 (Same code as done before for the pure indicator case)
indBuyerL2=0
tryToBidL2=False
bidAmountL2=0.0
if unifs[0] <= probBidL2[impType]:
## For each campaign we check if there is any that has enough budget to bid and that
## also wants to do so.
bidUsingL2=False
indInterested =\
(mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetL2[listCampPerImp[impType]]) * \
(mat_x_by_ImpL2[listCampPerImp[impType],impType]>0)
if np.sum(indInterested) >0:
bidUsingL2= True
if bidUsingL2:
## There is at least one campaign that wants to bid.
posInt=listCampPerImp[impType][indInterested]
## Conditional probability assuming that the method is going to bid.
## This conditional probability excludes all those campaigns
## that do not want to bid
condProbInterested=mat_x_by_ImpL2[posInt, impType]
condProbInterested*=1.0/np.sum(condProbInterested)
auxPartSum=0.0
## Now we will choose in behalf of which campaign to bid for.
numInterest = len(condProbInterested)
auxPosForindBuyerL2 = numInterest-1
z = 0
while z <numInterest:
auxPartSum += condProbInterested[z]
if auxPartSum >= unifs[1]:
## If we exceed unifs[1] go out of the loop
auxPosForindBuyerL2=z
z+=numInterest
z += 1
indBuyerL2=posInt[auxPosForindBuyerL2]
tryToBidL2=True
bidAmountL2=mat_bid_by_ImpL2[indBuyerL2, impType]
if(tryToBidL2):
cartBidsL2[indBuyerL2, impType]+=1
if bidAmountL2>= mp_value:
## Impression Won.
cartWonL2[indBuyerL2, impType]+=1
costBidsL2[impType]-=mp_value
profitL2[impType]-=mp_value
# Now we need to check if the ad was clicked.
probOfClick=mat_ctrTest[indBuyerL2, impType]
if (unifs[2]<=probOfClick):
## User clicked, increase revenue and charge the campaign.
cartClickedL2[indBuyerL2, impType]+=1
payment=mat_r_by_Imp[indBuyerL2, impType]
revenueL2[impType]+=payment
profitL2[impType]+=payment
budgetL2[indBuyerL2]-=payment
## Update L2Ind (Same code as done before for the pure indicator case)
indBuyerL2Ind=0
tryToBidL2Ind=False
bidAmountL2Ind=0.0
if unifs[0] <= probBidL2Ind[impType]:
## For each campaign we check if there is any that has enough budget to bid and that
## also wants to do so.
bidUsingL2Ind=False
indInterested =\
(mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetL2Ind[listCampPerImp[impType]]) * \
(mat_x_by_ImpL2Ind[listCampPerImp[impType],impType]>0)
if np.sum(indInterested) >0:
bidUsingL2Ind= True
if bidUsingL2Ind:
## There is at least one campaign that wants to bid.
posInt=listCampPerImp[impType][indInterested]
## Conditional probability assuming that the method is going to bid.
## This conditional probability excludes all those campaigns
## that do not want to bid
condProbInterested=mat_x_by_ImpL2Ind[posInt, impType]
condProbInterested*=1.0/np.sum(condProbInterested)
auxPartSum=0.0
## Now we will choose in behalf of which campaign to bid for.
numInterest = len(condProbInterested)
auxPosForindBuyerL2Ind = numInterest-1
z = 0
while z < numInterest:
auxPartSum += condProbInterested[z]
if auxPartSum >= unifs[1]:
## If we exceed unifs[1] go out of the loop
auxPosForindBuyerL2Ind=z
z+=numInterest
z += 1
indBuyerL2Ind=posInt[auxPosForindBuyerL2Ind]
tryToBidL2Ind=True
bidAmountL2Ind = mat_bid_by_ImpL2Ind[indBuyerL2Ind, impType]
if(tryToBidL2Ind):
cartBidsL2Ind[indBuyerL2Ind, impType]+=1
if bidAmountL2Ind>= mp_value:
## Impression Won.
cartWonL2Ind[indBuyerL2Ind, impType]+=1
costBidsL2Ind[impType]-=mp_value
profitL2Ind[impType]-=mp_value
# Now we need to check if the ad was clicked.
probOfClick=mat_ctrTest[indBuyerL2Ind, impType]
if (unifs[2]<=probOfClick):
## User clicked, increase revenue and charge the campaign.
cartClickedL2Ind[indBuyerL2Ind, impType]+=1
payment=mat_r_by_Imp[indBuyerL2Ind, impType]
revenueL2Ind[impType]+=payment
profitL2Ind[impType]+=payment
budgetL2Ind[indBuyerL2Ind]-=payment
### Now we update the Greedy Policy
## The greedy heuristic bids for the campaign which stills have remaining
## budget and from thos bid for the one with highest r times ctr.
## The previous is true as Ipinyou assumes second price auctions.
indBuyerGr=-1
bidAmountGr=0.0
tryToBidGr=False
indInterested =\
mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetGr[listCampPerImp[impType]]
if np.sum(indInterested) > 0:
posInt=listCampPerImp[impType][indInterested]
indBuyerGr = posInt[np.argmax(mat_rctr_by_ImpTrain[posInt,impType])]
bidAmountGr = mat_rctr_by_ImpTrain[indBuyerGr, impType]
tryToBidGr = True
## If tryToBidGr == True, we will bid in behalf of campaign 'indBuyerGr'
## the amount 'bidAmountGr'
if (tryToBidGr):
## Save that we are bidding in behalf of 'indBuyerGr' for an impression of
## type 'impType'
cartBidsGr[indBuyerGr, impType]+=1
## We win the auction only if the value we are bidding is higher
## than the market price observed by Ipinyou.
if bidAmountGr>= mp_value:
## Impression Won.
cartWonGr[indBuyerGr, impType]+=1
costBidsGr[impType]-=mp_value
profitGr[impType]-=mp_value
# Now we need to check if the ad was clicked.
probOfClick=mat_ctrTest[indBuyerGr, impType]
if (unifs[2]<=probOfClick):
## User clicked, increase revenue and charge the campaign.
cartClickedGr[indBuyerGr, impType]+=1
payment=mat_r_by_Imp[indBuyerGr, impType]
revenueGr[impType]+=payment
profitGr[impType]+=payment
budgetGr[indBuyerGr]-=payment
return [budgetInd, cartBidsInd, cartWonInd, cartClickedInd, costBidsInd, revenueInd, \
profitInd, budgetL2, cartBidsL2, cartWonL2, cartClickedL2, costBidsL2, revenueL2, \
profitL2, budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, cartClickedL2Ind, costBidsL2Ind, \
revenueL2Ind, profitL2Ind, budgetGr, cartBidsGr, cartWonGr, cartClickedGr, costBidsGr,\
revenueGr, profitGr]
# ## Simulation for the profit maximization, profit maximization + L2 and Greedy
# Here we want to run the experiment change the budget values to be [(1.0/32.0), (1.0/8.0), .25, 0.5, 1.0]
# of the budgets used by Ipinyou. The iteration over the percentage budget values is done in 'for perc in perVector_m:'
def ExperIndL2IndAndGreedy(numCampaigns, num_impressions, num_edges, index_Imps, \
index_sizeCamps, PPFTable, numericBeta, vector_q, vector_mOrigTest, \
vector_sTest, vector_ctrTrain, vector_ctrTest, ImpInOrder, MPInOrder, impNames, \
alphasInd, num_itInd, alphasL2Ind, num_itL2Ind, p_grad_TypeInd, p_grad_TypeL2Ind, \
tau, init_lam, listCampPerImp, perVector_m=[(1.0/32.0), (1.0/8.0), .25, 0.5, 1.0], sim=100):
print('Starting ExperIndL2IndAndGreedy')
## The gradient type is needed as the different utility functions have different forms
## for p'(\cdot), and we want to use the right subgradient depending on the method we are using.
global p_grad_Type
vector_rctrTrain=np.multiply(vector_q, vector_ctrTrain)
vector_rctrTest=np.multiply(vector_q, vector_ctrTest)
dictToRetInd={}
dictToRetL2Ind={}
dictToRetGr={}
for perc in perVector_m:
## We first run the primal dual-subgradient method using the pure indicator utility function first
## and then the indicator plus l2 penalization.
print("Percentage: "+str(perc))
vector_m = vector_mOrigTest*perc
vector_s = vector_sTest
ext_s = vector_s[index_Imps]
dictToRetInd[perc]=[]
dictToRetL2Ind[perc]=[]
dictToRetGr[perc]=[]
p_grad_Type=p_grad_TypeInd
print('About to Run the SubgrAlgSavPrimDualObjInd using '+str(num_itInd)+' iterations')
initTime =time.time()
[dual_FnValues, primal_GivenMu, budget_used, dual_vars, dual_AvgLamFnValues, \
primal_AvgLamGivenMu, budget_LamAvgUse, dual_varsAvg]=SubgrAlgSavPrimDualObjInd(\
init_lam, num_itInd, alphasInd, vector_q, vector_ctrTrain, vector_rctrTrain, vector_s, ext_s, \
vector_m, num_impressions, numCampaigns, num_edges, \
PPFTable, numericBeta, index_sizeCamps, index_Imps, (num_itInd-1), p_grad_Type)
print("Took: "+str( time.time()-initTime)+' seconds')
#print("Duality Gap Last Iteration")
#print(str(dual_AvgLamFnValues[len(dual_AvgLamFnValues)-1]-primal_AvgLamGivenMu[len(primal_AvgLamGivenMu)-1]))
lamFinal=dual_varsAvg[len(dual_varsAvg)-1]
ext_LamFinal=ExtendSizeCamps(lamFinal, index_sizeCamps)
bidsInd=OptimalBids(ext_LamFinal, vector_rctrTrain)
[rho_eval_Ind, beta_eval_Ind]=CalcRhoAndBetaVectors(bidsInd, num_edges, index_Imps, PPFTable, numericBeta)
xInd = CalculateLPGurobi(rho_eval_Ind, beta_eval_Ind, vector_rctrTrain, vector_m, \
ext_s, num_impressions, numCampaigns, num_edges, index_Imps, \
index_sizeCamps)
# xInd=OptimalX(beta_eval, rho_eval, ext_LamFinal, ext_s, vector_rctrTrain, num_edges, numCampaigns, \
# num_impressions, index_Imps, index_sizeCamps)
print('')
print('')
print('About to Run the SubgrAlgSavPrimDualObjFn_L2Ind using '+str(num_itL2Ind)+' iterations')
initTime =time.time()
p_grad_Type=p_grad_TypeL2Ind
tau=np.power(vector_m, -1)
[dual_FnValues, primal_GivenMu, budget_used, dual_vars, dual_AvgLamFnValues, \
primal_AvgLamGivenMu, budget_LamAvgUse, dual_varsAvg]=SubgrAlgSavPrimDualObjFn_L2Ind(\
init_lam, num_itL2Ind, alphasL2Ind, vector_q, vector_ctrTrain, vector_rctrTrain, vector_s, ext_s, \
vector_m, num_impressions, numCampaigns, num_edges, PPFTable, numericBeta, index_sizeCamps, \
index_Imps, (num_itL2Ind-1), p_grad_Type, tau, True)
print("Took: "+str( time.time()-initTime)+' seconds')
#print("Duality Gap Last Iteration")
#print(str(dual_AvgLamFnValues[len(dual_AvgLamFnValues)-1]-primal_AvgLamGivenMu[len(primal_AvgLamGivenMu)-1]))
lamFinal=dual_varsAvg[len(dual_varsAvg)-1]
ext_LamFinal=ExtendSizeCamps(lamFinal, index_sizeCamps)
bidsL2Ind=OptimalBids(ext_LamFinal, vector_rctrTrain)
[rho_eval_L2Ind, beta_eval_L2Ind]=CalcRhoAndBetaVectors(bidsL2Ind, num_edges, index_Imps, PPFTable, numericBeta)
xL2Ind = CalculateQuadGurobi(rho_eval_L2Ind, beta_eval_L2Ind, vector_rctrTrain, vector_m, ext_s, \
num_impressions, numCampaigns, num_edges, index_Imps, index_sizeCamps, tau)
# xL2Ind=OptimalX(beta_eval, rho_eval, ext_LamFinal, ext_s, vector_rctrTrain, num_edges, numCampaigns, \
# num_impressions, index_Imps, index_sizeCamps)
## Now that we have run the primal-dual subgradient methods we run simulations of
## how they would perform in the test log as explained in the paper. The nuber of simulations to
## run is equal to the parameter sim.
print('')
print('')
print('Finished running the Primal-Dual Algorithms')
print('Starting RunIndL2IndAndGreedy using '+str(perc)+' percentage of the Test budgets')
initTime =time.time()
for i in range(sim):
[budgetInd, cartBidsInd, cartWonInd, cartClickedInd, costBidsInd, \
revenueInd, profitInd, budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, \
cartClickedL2Ind, costBidsL2Ind, revenueL2Ind, profitL2Ind, budgetGr, \
cartBidsGr, cartWonGr, cartClickedGr, costBidsGr, revenueGr, profitGr]=\
RunIndL2IndAndGreedy(numCampaigns, num_impressions, num_edges, index_Imps, \
index_sizeCamps, PPFTable, numericBeta, vector_q, vector_m, vector_ctrTrain, \
vector_rctrTrain, vector_ctrTest, vector_rctrTest, bidsInd, xInd, bidsL2Ind, \
xL2Ind, tau, ImpInOrder, MPInOrder, impNames,listCampPerImp)
dictToRetInd[perc].append([budgetInd, cartBidsInd, cartWonInd, \
cartClickedInd, costBidsInd, revenueInd, profitInd])
dictToRetL2Ind[perc].append([budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, \
cartClickedL2Ind, costBidsL2Ind, revenueL2Ind, profitL2Ind])
dictToRetGr[perc].append([budgetGr, cartBidsGr, cartWonGr, cartClickedGr, \
costBidsGr, revenueGr, profitGr])
# print("Profit Ind: "+str(np.sum(profitInd)))
# print("Profit Gr: "+str(np.sum(profitGr)))
# print("Ratio of Profits: "+str(np.sum(profitInd)/np.sum(profitGr)))
print("Took: "+str(time.time()-initTime)+' seconds')
return [dictToRetInd, dictToRetL2Ind, dictToRetGr]
def Exper_Ind_L2_L2Ind_Greedy(numCampaigns, num_impressions, num_edges, index_Imps, index_sizeCamps,\
PPFTable, numericBeta, vector_q, vector_mOrigTest, vector_sTest, vector_ctrTrain, vector_ctrTest, \
ImpInOrder, MPInOrder, impNames, alphasInd, num_itInd, alphasL2, num_itL2, alphasL2Ind, num_itL2Ind,\
p_grad_TypeInd, p_grad_TypeL2, p_grad_TypeL2Ind, tau, init_lam, listCampPerImp,\
perVector_m=[(1.0/32.0), (1.0/8.0), .25, 0.5, 1.0], sim=100):
print('Starting Exper_Ind_L2_L2Ind_Greedy')
## The gradient type is needed as the different utility functions have different forms
## for p'(\cdot), and we want to use the right subgradient depending on the method we are using.
global p_grad_Type
vector_rctrTrain=np.multiply(vector_q, vector_ctrTrain)
vector_rctrTest=np.multiply(vector_q, vector_ctrTest)
dictToRetInd={}
dictToRetL2={}
dictToRetL2Ind={}
dictToRetGr={}
for perc in perVector_m:
## We first run the primal dual-subgradient method using the pure indicator utility function first
## and then the indicator plus l2 penalization.
print("Percentage: "+str(perc))
vector_m = vector_mOrigTest*perc
vector_s = vector_sTest
ext_s = vector_s[index_Imps]
dictToRetInd[perc] = []
dictToRetL2[perc] = []
dictToRetL2Ind[perc] = []
dictToRetGr[perc] = []
p_grad_Type=p_grad_TypeInd
print('About to Run the SubgrAlgSavPrimDualObjInd using '+str(num_itInd)+' iterations')
initTime =time.time()
[dual_FnValues, primal_GivenMu, budget_used, dual_vars, dual_AvgLamFnValues, \
primal_AvgLamGivenMu, budget_LamAvgUse, dual_varsAvg]=SubgrAlgSavPrimDualObjInd(\
init_lam, num_itInd, alphasInd, vector_q, vector_ctrTrain, vector_rctrTrain, vector_s, ext_s, \
vector_m, num_impressions, numCampaigns, num_edges, \
PPFTable, numericBeta, index_sizeCamps, index_Imps, (num_itInd-1), p_grad_Type)
print("Took: "+str( time.time()-initTime)+' seconds')
#print("Duality Gap Last Iteration")
#print(str(dual_AvgLamFnValues[len(dual_AvgLamFnValues)-1]-primal_AvgLamGivenMu[len(primal_AvgLamGivenMu)-1]))
lamFinal=dual_varsAvg[len(dual_varsAvg)-1]
ext_LamFinal=ExtendSizeCamps(lamFinal, index_sizeCamps)
bidsInd=OptimalBids(ext_LamFinal, vector_rctrTrain)
[rho_eval_Ind, beta_eval_Ind]=CalcRhoAndBetaVectors(bidsInd, num_edges, index_Imps, PPFTable, numericBeta)
xInd = CalculateLPGurobi(rho_eval_Ind, beta_eval_Ind, vector_rctrTrain, vector_m, \
ext_s, num_impressions, numCampaigns, num_edges, index_Imps, \
index_sizeCamps)
# xInd=OptimalX(beta_eval, rho_eval, ext_LamFinal, ext_s, vector_rctrTrain, num_edges, numCampaigns, \
# num_impressions, index_Imps, index_sizeCamps)
print('')
print('')
print('About to Run the SubgrAlgSavPrimDualObjFn_L2Ind using '+str(num_itL2Ind)+' iterations without Indicator')
initTime =time.time()
p_grad_Type=p_grad_TypeL2
tau=np.power(vector_m, -1)
[dual_FnValues, primal_GivenMu, budget_used, dual_vars, dual_AvgLamFnValues, \
primal_AvgLamGivenMu, budget_LamAvgUse, dual_varsAvg]=SubgrAlgSavPrimDualObjFn_L2Ind(\
init_lam, num_itL2Ind, alphasL2Ind, vector_q, vector_ctrTrain, vector_rctrTrain, vector_s, ext_s, \
vector_m, num_impressions, numCampaigns, num_edges, PPFTable, numericBeta, index_sizeCamps, \
index_Imps, (num_itL2Ind-1), p_grad_Type, tau, False)
print("Took: "+str( time.time()-initTime)+' seconds')
#print("Duality Gap Last Iteration")
#print(str(dual_AvgLamFnValues[len(dual_AvgLamFnValues)-1]-primal_AvgLamGivenMu[len(primal_AvgLamGivenMu)-1]))
lamFinal=dual_varsAvg[len(dual_varsAvg)-1]
ext_LamFinal=ExtendSizeCamps(lamFinal, index_sizeCamps)
bidsL2=OptimalBids(ext_LamFinal, vector_rctrTrain)
[rho_eval_L2, beta_eval_L2]=CalcRhoAndBetaVectors(bidsL2, num_edges, index_Imps, PPFTable, numericBeta)
xL2 = CalculateQuadGurobi(rho_eval_L2, beta_eval_L2, vector_rctrTrain, vector_m, ext_s, \
num_impressions, numCampaigns, num_edges, index_Imps, index_sizeCamps, tau, addIndicator = False)
print('')
print('')
print('About to Run the SubgrAlgSavPrimDualObjFn_L2Ind using '+str(num_itL2Ind)+' iterations')
initTime =time.time()
p_grad_Type=p_grad_TypeL2Ind
tau=np.power(vector_m, -1)
[dual_FnValues, primal_GivenMu, budget_used, dual_vars, dual_AvgLamFnValues, \
primal_AvgLamGivenMu, budget_LamAvgUse, dual_varsAvg]=SubgrAlgSavPrimDualObjFn_L2Ind(\
init_lam, num_itL2Ind, alphasL2Ind, vector_q, vector_ctrTrain, vector_rctrTrain, vector_s, ext_s, \
vector_m, num_impressions, numCampaigns, num_edges, PPFTable, numericBeta, index_sizeCamps, \
index_Imps, (num_itL2Ind-1), p_grad_Type, tau, True)
print("Took: "+str( time.time()-initTime)+' seconds')
#print("Duality Gap Last Iteration")
#print(str(dual_AvgLamFnValues[len(dual_AvgLamFnValues)-1]-primal_AvgLamGivenMu[len(primal_AvgLamGivenMu)-1]))
lamFinal=dual_varsAvg[len(dual_varsAvg)-1]
ext_LamFinal=ExtendSizeCamps(lamFinal, index_sizeCamps)
bidsL2Ind=OptimalBids(ext_LamFinal, vector_rctrTrain)
[rho_eval_L2Ind, beta_eval_L2Ind]=CalcRhoAndBetaVectors(bidsL2Ind, num_edges, index_Imps, PPFTable, numericBeta)
xL2Ind = CalculateQuadGurobi(rho_eval_L2Ind, beta_eval_L2Ind, vector_rctrTrain, vector_m, ext_s, \
num_impressions, numCampaigns, num_edges, index_Imps, index_sizeCamps, tau, addIndicator = True)
# xL2Ind=OptimalX(beta_eval, rho_eval, ext_LamFinal, ext_s, vector_rctrTrain, num_edges, numCampaigns, \
# num_impressions, index_Imps, index_sizeCamps)
## Now that we have run the primal-dual subgradient methods we run simulations of
## how they would perform in the test log as explained in the paper. The nuber of simulations to
## run is equal to the parameter sim.
print('')
print('')
print('Finished running the Primal-Dual Algorithms')
print('Starting RunInd_L2_L2Ind_Greedy using '+str(perc)+' percentage of the Test budgets')
initTime =time.time()
for i in range(sim):
[budgetInd, cartBidsInd, cartWonInd, cartClickedInd, costBidsInd, revenueInd, \
profitInd, budgetL2, cartBidsL2, cartWonL2, cartClickedL2, costBidsL2, revenueL2, \
profitL2, budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, cartClickedL2Ind, costBidsL2Ind, \
revenueL2Ind, profitL2Ind, budgetGr, cartBidsGr, cartWonGr, cartClickedGr, costBidsGr,\
revenueGr, profitGr] = RunInd_L2_L2Ind_Greedy(numCampaigns, num_impressions, num_edges, index_Imps, \
index_sizeCamps, PPFTable, numericBeta, vector_q, vector_m, vector_ctrTrain, \
vector_rctrTrain, vector_ctrTest, vector_rctrTest, bidsInd, xInd, bidsL2, xL2, bidsL2Ind, \
xL2Ind, tau, ImpInOrder, MPInOrder, impNames,listCampPerImp)
dictToRetInd[perc].append([budgetInd, cartBidsInd, cartWonInd, \
cartClickedInd, costBidsInd, revenueInd, profitInd])
dictToRetL2[perc].append([budgetL2, cartBidsL2, cartWonL2, \
cartClickedL2, costBidsL2, revenueL2, profitL2])
dictToRetL2Ind[perc].append([budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, \
cartClickedL2Ind, costBidsL2Ind, revenueL2Ind, profitL2Ind])
dictToRetGr[perc].append([budgetGr, cartBidsGr, cartWonGr, cartClickedGr, \
costBidsGr, revenueGr, profitGr])
print("Took: "+str(time.time()-initTime)+' seconds')
return [dictToRetInd, dictToRetL2, dictToRetL2Ind, dictToRetGr]
def ExperIndL2IndAndGreedyOnePerc(numCampaigns, num_impressions, num_edges, index_Imps, \
index_sizeCamps, PPFTable, numericBeta, vector_q, vector_mOrigTest, \
vector_sTest, vector_ctrTrain, vector_ctrTest, ImpInOrder, MPInOrder, impNames, \
alphasInd, num_itInd, alphasL2Ind, num_itL2Ind, p_grad_TypeInd, p_grad_TypeL2Ind, \
init_lam, listCampPerImp, perc, sim, seeds):
print('Starting ExperIndL2IndAndGreedy')
## The gradient type is needed as the different utility functions have different forms
## for p'(\cdot), and we want to use the right subgradient depending on the method we are using.
np.random.seed(12345)
global p_grad_Type
vector_rctrTrain=np.multiply(vector_q, vector_ctrTrain)
vector_rctrTest= | np.multiply(vector_q, vector_ctrTest) | numpy.multiply |
# -*- coding: utf-8 -*-
import numpy as np
from arpym.statistics.cdf_sp import cdf_sp
def cop_marg_sep(x, p=None):
"""For details, see here.
Parameters
----------
x : array, shape (j_, n_) if n_>1 or (j_,) for n_=1
p : array, optional, shape (j_,)
Returns
-------
u : array, shape (j_, n_) if n_>1 or (j_,) for n_=1
x_grid : array, shape (j_, n_) if n_>1 or (j_,) for n_=1
cdf_x : array, shape (j_, n_) if n_>1 or (j_,) for n_=1
"""
if len(x.shape) == 1:
x = x.reshape(-1, 1)
j_, n_ = x.shape
if p is None:
p = np.ones(j_) / j_ # equal probabilities as default value
# Step 1: Sort scenarios
x_grid, ind_sort = np.sort(x, axis=0), | np.argsort(x, axis=0) | numpy.argsort |
#!/usr/bin/env python
"""Easily convert RGB video data (e.g. .avi) to the TensorFlow tfrecords file format with the provided 3 color channels.
Allows to subsequently train a neural network in TensorFlow with the generated tfrecords.
Due to common hardware/GPU RAM limitations, this implementation allows to limit the number of frames per
video actually stored in the tfrecords. The code automatically chooses the frame step size such that there is
an equal separation distribution of the video images. Implementation supports Optical Flow
(currently OpenCV's calcOpticalFlowFarneback) as an additional 4th channel.
"""
from tensorflow.python.platform import gfile
from tensorflow.python.platform import flags
from tensorflow.python.platform import app
import cv2 as cv2
import numpy as np
import math
import os
import tensorflow as tf
import time
FLAGS = flags.FLAGS
flags.DEFINE_integer('n_videos_in_record', 10,
'Number of videos stored in one single tfrecord file')
flags.DEFINE_string('image_color_depth', "uint8",
'Color depth as string for the images stored in the tfrecord files. '
'Has to correspond to the source video color depth. '
'Specified as dtype (e.g. uint8 or uint16)')
flags.DEFINE_string('file_suffix', "*.mp4",
'defines the video file type, e.g. .mp4')
flags.DEFINE_string('source', './example/input', 'Directory with video files')
flags.DEFINE_string('destination', './example/output',
'Directory for storing tf records')
flags.DEFINE_boolean('optical_flow', True,
'Indicates whether optical flow shall be computed and added as fourth '
'channel.')
flags.DEFINE_integer('width_video', 1280, 'the width of the videos in pixels')
flags.DEFINE_integer('height_video', 720, 'the height of the videos in pixels')
flags.DEFINE_integer('n_frames_per_video', 5,
'specifies the number of frames to be taken from each video')
flags.DEFINE_integer('n_channels', 4,
'specifies the number of channels the videos have')
flags.DEFINE_string('video_filenames', None,
'specifies the video file names as a list in the case the video paths shall not be determined by the '
'script')
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def get_chunks(l, n):
"""Yield successive n-sized chunks from l.
Used to create n sublists from a list l"""
for i in range(0, len(l), n):
yield l[i:i + n]
def get_video_capture_and_frame_count(path):
assert os.path.isfile(
path), "Couldn't find video file:" + path + ". Skipping video."
cap = None
if path:
cap = cv2.VideoCapture(path)
assert cap is not None, "Couldn't load video capture:" + path + ". Skipping video."
# compute meta data of video
if hasattr(cv2, 'cv'):
frame_count = int(cap.get(cv2.cv.CAP_PROP_FRAME_COUNT))
else:
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
return cap, frame_count
def get_next_frame(cap):
ret, frame = cap.read()
if not ret:
return None
return np.asarray(frame)
def compute_dense_optical_flow(prev_image, current_image):
old_shape = current_image.shape
prev_image_gray = cv2.cvtColor(prev_image, cv2.COLOR_BGR2GRAY)
current_image_gray = cv2.cvtColor(current_image, cv2.COLOR_BGR2GRAY)
assert current_image.shape == old_shape
hsv = np.zeros_like(prev_image)
hsv[..., 1] = 255
flow = None
flow = cv2.calcOpticalFlowFarneback(prev=prev_image_gray,
next=current_image_gray, flow=flow,
pyr_scale=0.8, levels=15, winsize=5,
iterations=10, poly_n=5, poly_sigma=0,
flags=10)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
def convert_videos_to_tfrecord(source_path, destination_path,
n_videos_in_record=10, n_frames_per_video='all',
file_suffix="*.mp4", dense_optical_flow=True,
width=1280, height=720,
color_depth="uint8", video_filenames=None):
"""Starts the process of converting video files to tfrecord files. If
dense_optical_flow is set to True, the number of video channels in the
tfrecords will automatically 4, i.e. the pipeline assumes 3 (RGB) channels
in the videos. This pipeline does not (yet) support a different number of
channels.
Args:
source_path: directory where video videos are stored
destination_path: directory where tfrecords should be stored
n_videos_in_record: Number of videos stored in one single tfrecord file
n_frames_per_video: integer value of string. Specifies the number of frames extracted from each video. If set to 'all', all frames are extracted from the
videos and stored in the tfrecord. If the number is lower than the number of available frames, the subset of extracted frames will be selected equally
spaced over the entire video playtime.
file_suffix: defines the video file type, e.g. *.mp4
dense_optical_flow: boolean flag that controls if optical flow should be
used and added to tfrecords
width: the width of the videos in pixels
height: the height of the videos in pixels
color_depth: Color depth as string for the images stored in the tfrecord
files. Has to correspond to the source video color depth. Specified as
dtype (e.g. uint8 or uint16)
video_filenames: specify, if the the full paths to the videos can be
directly be provided. In this case, the source will be ignored.
"""
assert isinstance(n_frames_per_video, (int, str))
if type(n_frames_per_video) is str:
assert n_frames_per_video == "all"
if dense_optical_flow:
n_channels = 4
else:
n_channels = 3
if video_filenames is not None:
filenames = video_filenames
else:
filenames = gfile.Glob(os.path.join(source_path, file_suffix))
if not filenames:
raise RuntimeError('No data files found.')
print('Total videos found: ' + str(len(filenames)))
filenames_split = list(get_chunks(filenames, n_videos_in_record))
for i, batch in enumerate(filenames_split):
data = convert_video_to_numpy(filenames=batch, width=width, height=height,
n_frames_per_video=n_frames_per_video,
n_channels=n_channels,
dense_optical_flow=dense_optical_flow)
if n_videos_in_record > len(filenames):
total_batch_number = 1
else:
total_batch_number = int(math.ceil(len(filenames) / n_videos_in_record))
print('Batch ' + str(i + 1) + '/' + str(total_batch_number) + " completed")
assert data.size != 0, 'something went wrong during video to numpy conversion'
save_numpy_to_tfrecords(data, destination_path, 'batch_',
n_videos_in_record, i + 1, total_batch_number,
color_depth=color_depth)
def save_numpy_to_tfrecords(data, destination_path, name, fragmentSize,
current_batch_number, total_batch_number,
color_depth):
"""Converts an entire dataset into x tfrecords where x=videos/fragmentSize.
Args:
data: ndarray(uint32) of shape (v,i,h,w,c) with v=number of videos,
i=number of images, c=number of image channels, h=image height, w=image
width
name: filename; data samples type (train|valid|test)
fragmentSize: specifies how many videos are stored in one tfrecords file
current_batch_number: indicates the current batch index (function call within loop)
total_batch_number: indicates the total number of batches
"""
num_videos = data.shape[0]
num_images = data.shape[1]
num_channels = data.shape[4]
height = data.shape[2]
width = data.shape[3]
writer = None
feature = {}
for video_count in range((num_videos)):
if video_count % fragmentSize == 0:
if writer is not None:
writer.close()
filename = os.path.join(destination_path,
name + str(current_batch_number) + '_of_' + str(
total_batch_number) + '.tfrecords')
print('Writing', filename)
writer = tf.python_io.TFRecordWriter(filename)
for image_count in range(num_images):
path = 'blob' + '/' + str(image_count)
image = data[video_count, image_count, :, :, :]
image = image.astype(color_depth)
image_raw = image.tostring()
feature[path] = _bytes_feature(image_raw)
feature['height'] = _int64_feature(height)
feature['width'] = _int64_feature(width)
feature['depth'] = _int64_feature(num_channels)
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
if writer is not None:
writer.close()
def repeat_image_retrieval(cap, file_path, video, take_all_frames, steps, frame,
prev_frame_none, frames_counter):
stop = False
if frame and prev_frame_none or steps <= 0:
stop = True
return stop, cap, video, steps, prev_frame_none, frames_counter
if not take_all_frames:
# repeat with smaller step size
steps -= 1
prev_frame_none = True
print("reducing step size due to error for video: ", file_path)
frames_counter = 0
cap.release()
cap = get_video_capture_and_frame_count(file_path)
# wait for image retrieval to be ready
time.sleep(2)
return stop, cap, video, steps, prev_frame_none, frames_counter
def video_file_to_ndarray(i, file_path, n_frames_per_video, height, width,
n_channels, num_real_image_channel,
dense_optical_flow, number_of_videos):
cap, frame_count = get_video_capture_and_frame_count(file_path)
take_all_frames = False
# if not all frames are to be used, we have to skip some -> set step size accordingly
if n_frames_per_video == 'all':
take_all_frames = True
video = | np.zeros((frame_count, height, width, n_channels), dtype=np.uint32) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Based on Argo's density inversion test. Test 14 @ Argo QC 2.9.1
"""
import logging
import numpy as np
from numpy import ma
from .qctests import QCCheck
module_logger = logging.getLogger(__name__)
try:
import gsw
GSW_AVAILABLE = True
except ImportError:
module_logger.debug("Missing package GSW, used to estimate density when needed.")
GSW_AVAILABLE = False
def densitystep(SA, t, p, auto_rotate=False):
"""Estimates the potential density step of successive mesurements
Expects the data to be recorded along the time, i.e. first measurement
was recorded first. This makes difference since the first measurement
has no reference to define the delta change.
This is relevant for the type of instrument. For instance: XBTs are
always measured surface to bottom, CTDs are expected the same, but
Spray underwater gliders measure bottom to surface.
"""
assert np.shape(t) == np.shape(p)
assert np.shape(t) == np.shape(SA)
assert | np.ndim(t) | numpy.ndim |
from __future__ import print_function, division
import os, re, sys
import logging
from .config import on_rtd
if not on_rtd:
import numpy as np
import pandas as pd
from configobj import ConfigObj
from asciitree import LeftAligned, Traversal
from asciitree.drawing import BoxStyle, BOX_DOUBLE, BOX_BLANK
from collections import OrderedDict
from itertools import chain, count
try:
from itertools import imap, izip
except ImportError: # Python 3
imap = map
izip = zip
xrange = range
else:
class Traversal(object):
pass
class LeftAligned(object):
pass
from .isochrone import get_ichrone
from .utils import addmags, distance
class NodeTraversal(Traversal):
"""
Custom subclass to traverse tree for ascii printing
"""
def __init__(self, pars=None, **kwargs):
self.pars = pars
super(NodeTraversal,self).__init__(**kwargs)
def get_children(self, node):
return node.children
def get_root(self, node):
return node
return node.get_root()
def get_text(self, node):
text = node.label
if self.pars is not None:
if hasattr(node, 'model_mag'):
text += '; model={:.2f} ({})'.format(node.model_mag(self.pars),
node.lnlike(self.pars))
if type(node)==ModelNode:
root = node.get_root()
if hasattr(root, 'spectroscopy'):
if node.label in root.spectroscopy:
for k,v in root.spectroscopy[node.label].items():
text += ', {}={}'.format(k,v)
modval = node.evaluate(self.pars[node.label], k)
lnl = -0.5*(modval - v[0])**2/v[1]**2
text += '; model={} ({})'.format(modval, lnl)
if node.label in root.limits:
for k,v in root.limits[node.label].items():
text += ', {} limits={}'.format(k,v)
text += ': {}'.format(self.pars[node.label])
else:
if type(node)==ModelNode:
root = node.get_root()
if hasattr(root, 'spectroscopy'):
if node.label in root.spectroscopy:
for k,v in root.spectroscopy[node.label].items():
text += ', {}={}'.format(k,v)
if node.label in root.limits:
for k,v in root.limits[node.label].items():
text += ', {} limits={}'.format(k,v)
#root = node.get_root()
#if hasattr(root,'spectroscopy'):
# if node.label in root.spectroscopy:
# for k,v in root.spectroscopy[node.label].items():
# model = node.evaluate(self.pars[node.label], k)
# text += '\n {}={} (model={})'.format(k,v,model)
return text
class MyLeftAligned(LeftAligned):
"""For custom ascii tree printing
"""
pars = None
def __init__(self, pars=None, **kwargs):
self.pars = pars
self.traverse = NodeTraversal(pars)
super(MyLeftAligned,self).__init__(**kwargs)
class Node(object):
def __init__(self, label):
self.label = label
self.parent = None
self.children = []
self._leaves = None
def __iter__(self):
"""
Iterate through tree, leaves first
following http://stackoverflow.com/questions/6914803/python-iterator-through-tree-with-list-of-children
"""
for node in chain(*imap(iter, self.children)):
yield node
yield self
def __getitem__(self, ind):
for n,i in izip(self, count()):
if i==ind:
return n
@property
def is_root(self):
return self.parent is None
def get_root(self):
if self.is_root:
return self
else:
return self.parent.get_root()
def get_ancestors(self):
if self.parent.is_root:
return []
else:
return [self.parent] + self.parent.get_ancestors()
def print_ascii(self, fout=None, pars=None):
box_tr = MyLeftAligned(pars,draw=BoxStyle(gfx=BOX_DOUBLE, horiz_len=1))
if fout is None:
print(box_tr(self))
else:
fout.write(box_tr(self))
@property
def is_leaf(self):
return len(self.children)==0 and not self.is_root
def _clear_leaves(self):
self._leaves = None
def _clear_all_leaves(self):
if not self.is_root:
self.parent._clear_all_leaves()
self._clear_leaves()
def add_child(self, node):
node.parent = self
self.children.append(node)
self._clear_all_leaves()
def remove_children(self):
self.children = []
self._clear_all_leaves()
def remove_child(self, label):
"""
Removes node by label
"""
ind = None
for i,c in enumerate(self.children):
if c.label==label:
ind = i
if ind is None:
logging.warning('No child labeled {}.'.format(label))
return
self.children.pop(ind)
self._clear_all_leaves()
def attach_to_parent(self, node):
# detach from current parent, if necessary
if self.parent is not None:
self.parent.remove_child(self.label)
node.children += [self]
self.parent = node
self._clear_all_leaves()
@property
def leaves(self):
if self._leaves is None:
self._leaves = self._get_leaves()
return self._leaves
def _get_leaves(self):
if self.is_leaf:
return [self]
else:
leaves = []
for c in self.children:
leaves += c._get_leaves()
return leaves
def select_leaves(self, name):
"""Returns all leaves under all nodes matching name
"""
if self.is_leaf:
return [self] if re.search(name, self.label) else []
else:
leaves = []
if re.search(name, self.label):
for c in self.children:
leaves += c._get_leaves() #all leaves
else:
for c in self.children:
leaves += c.select_leaves(name) #only matching ones
return leaves
@property
def leaf_labels(self):
return [l.label for l in self.leaves]
def get_leaf(self, label):
for l in self.leaves:
if label==l.label:
return l
def get_obs_nodes(self):
return [l for l in self if isinstance(l, ObsNode)]
@property
def obs_leaf_nodes(self):
return self.get_obs_leaves()
def get_obs_leaves(self):
"""Returns the last obs nodes that are leaves
"""
obs_leaves = []
for n in self:
if n.is_leaf:
if isinstance(n, ModelNode):
l = n.parent
else:
l = n
if l not in obs_leaves:
obs_leaves.append(l)
return obs_leaves
def get_model_nodes(self):
return [l for l in self._get_leaves() if isinstance(l, ModelNode)]
@property
def N_model_nodes(self):
return len(self.get_model_nodes())
def print_tree(self):
print(self.label)
def __str__(self):
return self.label
def __repr__(self):
if self.is_leaf:
s = "<{} '{}', parent='{}'>".format(self.__class__,
self.label,
self.parent)
else:
child_labels = [str(c) for c in self.children]
s = "<{} '{}', parent='{}', children={}>".format(self.__class__,
self.label,
self.parent,
child_labels)
return s
class ObsNode(Node):
def __init__(self, observation, source, ref_node=None):
self.observation = observation
self.source = source
self.reference = ref_node
self.children = []
self.parent = None
self._leaves = None
#indices of underlying models, defining physical systems
self._inds = None
self._n_params = None
self._Nstars = None
#for model_mag caching
self._cache_key = None
self._cache_val = None
@property
def instrument(self):
return self.observation.name
@property
def band(self):
return self.observation.band
@property
def value(self):
return (self.source.mag, self.source.e_mag)
@property
def resolution(self):
return self.observation.resolution
@property
def relative(self):
return self.source.relative
@property
def separation(self):
return self.source.separation
@property
def pa(self):
return self.source.pa
@property
def value_str(self):
return '({:.2f}, {:.2f})'.format(*self.value)
def distance(self, other):
"""Coordinate distance from another ObsNode
"""
return distance((self.separation, self.pa), (other.separation, other.pa))
def _in_same_observation(self, other):
return self.instrument==other.instrument and self.band==other.band
@property
def n_params(self):
if self._n_params is None:
self._n_params = 5 * len(self.leaves)
return self._n_params
def _get_inds(self):
inds = [n.index for n in self.leaves]
inds = sorted(list(set(inds)))
return inds
def _clear_leaves(self):
self._leaves = None
self._inds = None
self._n_params = None
self._Nstars = None
@property
def Nstars(self):
"""
dictionary of number of stars per system
"""
if self._Nstars is None:
N = {}
for n in self.get_model_nodes():
if n.index not in N:
N[n.index] = 1
else:
N[n.index] += 1
self._Nstars = N
return self._Nstars
@property
def systems(self):
lst = sorted(self.Nstars.keys())
return lst
@property
def inds(self):
if self._inds is None:
self._inds = self._get_inds()
return self._inds
@property
def label(self):
if self.source.relative:
band_str = 'delta-{}'.format(self.band)
else:
band_str = self.band
return '{} {}={} @({:.2f}, {:.0f} [{:.2f}])'.format(self.instrument,
band_str,
self.value_str, self.separation, self.pa,
self.resolution)
@property
def obsname(self):
return '{}-{}'.format(self.instrument, self.band)
def get_system(self, ind):
system = []
for l in self.get_root().leaves:
try:
if l.index==ind:
system.append(l)
except AttributeError:
pass
return system
def add_model(self, ic, N=1, index=0):
"""
Should only be able to do this to a leaf node.
Either N and index both integers OR index is
list of length=N
"""
if type(index) in [list,tuple]:
if len(index) != N:
raise ValueError('If a list, index must be of length N.')
else:
index = [index]*N
for idx in index:
existing = self.get_system(idx)
tag = len(existing)
self.add_child(ModelNode(ic, index=idx, tag=tag))
def model_mag(self, pardict, use_cache=True):
"""
pardict is a dictionary of parameters for all leaves
gets converted back to traditional parameter vector
"""
if pardict == self._cache_key and use_cache:
#print('{}: using cached'.format(self))
return self._cache_val
#print('{}: calculating'.format(self))
self._cache_key = pardict
# Generate appropriate parameter vector from dictionary
p = []
for l in self.leaf_labels:
p.extend(pardict[l])
assert len(p) == self.n_params
tot = np.inf
#print('Building {} mag for {}:'.format(self.band, self))
for i,m in enumerate(self.leaves):
mag = m.evaluate(p[i*5:(i+1)*5], self.band)
# logging.debug('{}: mag={}'.format(self,mag))
#print('{}: {}({}) = {}'.format(m,self.band,p[i*5:(i+1)*5],mag))
tot = addmags(tot, mag)
self._cache_val = tot
return tot
def lnlike(self, pardict, use_cache=True):
"""
returns log-likelihood of this observation
pardict is a dictionary of parameters for all leaves
gets converted back to traditional parameter vector
"""
mag, dmag = self.value
if np.isnan(dmag):
return 0
if self.relative:
# If this *is* the reference, just return
if self.reference is None:
return 0
mod = (self.model_mag(pardict, use_cache=use_cache) -
self.reference.model_mag(pardict, use_cache=use_cache))
mag -= self.reference.value[0]
else:
mod = self.model_mag(pardict, use_cache=use_cache)
lnl = -0.5*(mag - mod)**2 / dmag**2
# logging.debug('{} {}: mag={}, mod={}, lnlike={}'.format(self.instrument,
# self.band,
# mag,mod,lnl))
return lnl
class DummyObsNode(ObsNode):
def __init__(self, *args, **kwargs):
self.observation = None
self.source = None
self.reference = None
self.children = []
self.parent = None
self._leaves = None
#indices of underlying models, defining physical systems
self._inds = None
self._n_params = None
self._Nstars = None
#for model_mag caching
self._cache_key = None
self._cache_val = None
@property
def label(self):
return '[dummy]'
@property
def value(self):
return None, None
def lnlike(self, *args, **kwargs):
return 0
class ModelNode(Node):
"""
These are always leaves; leaves are always these.
Index keeps track of which physical system node is in.
"""
def __init__(self, ic, index=0, tag=0):
self._ic = ic
self.index = index
self.tag = tag
self.children = []
self.parent = None
self._leaves = None
@property
def label(self):
return '{}_{}'.format(self.index, self.tag)
@property
def ic(self):
if type(self._ic)==type:
self._ic = self._ic()
return self._ic
def get_obs_ancestors(self):
nodes = self.get_ancestors()
return [n for n in nodes if isinstance(n, ObsNode)]
@property
def contributing_observations(self):
"""The instrument-band for all the observations feeding into this model node
"""
return [n.obsname for n in self.get_obs_ancestors()]
def evaluate(self, p, prop):
if prop in self.ic.bands:
return self.evaluate_mag(p, prop)
elif prop=='mass':
return p[0]
elif prop=='age':
return p[1]
elif prop=='feh':
return p[2]
elif prop in ['Teff','logg','radius']:
return getattr(self.ic, prop)(*p[:3])
else:
raise ValueError('property {} cannot be evaluated by Isochrone.'.format(prop))
def evaluate_mag(self, p, band):
return self.ic.mag[band](*p)
def lnlike(self, *args, **kwargs):
return 0
class Source(object):
def __init__(self, mag, e_mag, separation=0., pa=0.,
relative=False, is_reference=False):
self.mag = float(mag)
self.e_mag = float(e_mag)
self.separation = float(separation)
self.pa = float(pa)
self.relative = bool(relative)
self.is_reference = bool(is_reference)
def __str__(self):
return '({}, {}) @({}, {})'.format(self.mag, self.e_mag,
self.separation, self.pa)
def __repr__(self):
return self.__str__()
class Star(object):
"""Theoretical counterpart of Source.
"""
def __init__(self, pars, separation, pa):
self.pars = pars
self.separation = separation
self.pa = pa
def distance(self, other):
return distance((self.separation, self.pa),
(other.separation, other.pa))
class Observation(object):
"""
Contains relevant information about imaging observation
name: identifying string (typically the instrument)
band: photometric bandpass
resolution: *approximate* angular resolution of instrument.
used for source matching between observations
sources: list of Source objects
"""
def __init__(self, name, band, resolution, sources=None,
relative=False):
self.name = name
self.band = band
self.resolution = resolution
if sources is not None:
if not np.all(type(s)==Source for s in sources):
raise ValueError('Source list must be all Source objects.')
self.sources = []
if sources is None:
sources = []
for s in sources:
self.add_source(s)
self.relative = relative
self._set_reference()
def observe(self, stars, unc, ic=None):
"""Creates and adds appropriate synthetic Source objects for list of stars (max 2 for now)
"""
if ic is None:
ic = get_ichrone('mist')
if len(stars) > 2:
raise NotImplementedError('No support yet for > 2 synthetic stars')
mags = [ic(*s.pars)['{}_mag'.format(self.band)].values[0] for s in stars]
d = stars[0].distance(stars[1])
if d < self.resolution:
mag = addmags(*mags) + unc*np.random.randn()
sources = [Source(mag, unc, stars[0].separation, stars[0].pa,
relative=self.relative)]
else:
mags = np.array([m + unc*np.random.randn() for m in mags])
if self.relative:
mags -= mags.min()
sources = [Source(m, unc, s.separation, s.pa, relative=self.relative)
for m,s in zip(mags, stars)]
for s in sources:
self.add_source(s)
self._set_reference()
def add_source(self, source):
"""
Adds source to observation, keeping sorted order (in separation)
"""
if not type(source)==Source:
raise TypeError('Can only add Source object.')
if len(self.sources)==0:
self.sources.append(source)
else:
ind = 0
for s in self.sources:
# Keep sorted order of separation
if source.separation < s.separation:
break
ind += 1
self.sources.insert(ind, source)
#self._set_reference()
@property
def brightest(self):
mag0 = np.inf
s0 = None
for s in self.sources:
if s.mag < mag0:
mag0 = s.mag
s0 = s
return s0
def _set_reference(self):
"""If relative, make sure reference node is set to brightest.
"""
if len(self.sources) > 0:
self.brightest.is_reference = True
def __str__(self):
return '{}-{}'.format(self.name, self.band)
def __repr__(self):
return str(self)
class ObservationTree(Node):
"""Builds a tree of Nodes from a list of Observation objects
Organizes Observations from smallest to largest resolution,
and at each stage attaches each source to the most probable
match from the previous Observation. Admittedly somewhat hack-y,
but should *usually* do the right thing. Check out `obs.print_ascii()`
to visualize what this has done.
"""
spec_props = ['Teff', 'logg', 'feh']
def __init__(self, observations=None, name=None):
if observations is None:
observations = []
if name is None:
self.label = 'root'
else:
self.label = name
self.parent = None
self._observations = []
self._build_tree()
[self.add_observation(obs) for obs in observations]
self._N = None
self._index = None
# Spectroscopic properties
self.spectroscopy = {}
# Limits (such as minimum on logg)
self.limits = {}
# Parallax measurements
self.parallax = {}
# This will be calculated and set at first access
self._Nstars = None
#likelihood cache
self._cache_key = None
self._cache_val = None
@property
def name(self):
return self.label
def _clear_cache(self):
self._cache_key = None
self._cache_val = None
@classmethod
def from_df(cls, df, **kwargs):
"""
DataFrame must have the right columns.
these are: name, band, resolution, mag, e_mag, separation, pa
"""
tree = cls(**kwargs)
for (n,b), g in df.groupby(['name','band']):
#g.sort('separation', inplace=True) #ensures that the first is reference
sources = [Source(**s[['mag','e_mag','separation','pa','relative']])
for _,s in g.iterrows()]
obs = Observation(n, b, g.resolution.mean(),
sources=sources, relative=g.relative.any())
tree.add_observation(obs)
# For all relative mags, set reference to be brightest
return tree
@classmethod
def from_ini(cls, filename):
config = ConfigObj(filename)
def to_df(self):
"""
Returns DataFrame with photometry from observations organized.
This DataFrame should be able to be read back in to
reconstruct the observation.
"""
df = pd.DataFrame()
name = []
band = []
resolution = []
mag = []
e_mag = []
separation = []
pa = []
relative = []
for o in self._observations:
for s in o.sources:
name.append(o.name)
band.append(o.band)
resolution.append(o.resolution)
mag.append(s.mag)
e_mag.append(s.e_mag)
separation.append(s.separation)
pa.append(s.pa)
relative.append(s.relative)
return pd.DataFrame({'name':name,'band':band,'resolution':resolution,
'mag':mag,'e_mag':e_mag,'separation':separation,
'pa':pa,'relative':relative})
def save_hdf(self, filename, path='', overwrite=False, append=False):
"""
Writes all info necessary to recreate object to HDF file
Saves table of photometry in DataFrame
Saves model specification, spectroscopy, parallax to attrs
"""
if os.path.exists(filename):
store = pd.HDFStore(filename)
if path in store:
store.close()
if overwrite:
os.remove(filename)
elif not append:
raise IOError('{} in {} exists. Set either overwrite or append option.'.format(path,filename))
else:
store.close()
df = self.to_df()
df.to_hdf(filename, path+'/df')
with pd.HDFStore(filename) as store:
# store = pd.HDFStore(filename)
attrs = store.get_storer(path+'/df').attrs
attrs.spectroscopy = self.spectroscopy
attrs.parallax = self.parallax
attrs.N = self._N
attrs.index = self._index
store.close()
@classmethod
def load_hdf(cls, filename, path='', ic=None):
"""
Loads stored ObservationTree from file.
You can provide the isochrone to use; or it will default to MIST
TODO: saving and loading must be fixed! save ic type, bands, etc.
"""
store = pd.HDFStore(filename)
try:
samples = store[path+'/df']
attrs = store.get_storer(path+'/df').attrs
except:
store.close()
raise
df = store[path+'/df']
new = cls.from_df(df)
if ic is None:
ic = get_ichrone('mist')
new.define_models(ic, N=attrs.N, index=attrs.index)
new.spectroscopy = attrs.spectroscopy
new.parallax = attrs.parallax
store.close()
return new
def add_observation(self, obs):
"""Adds an observation to observation list, keeping proper order
"""
if len(self._observations)==0:
self._observations.append(obs)
else:
res = obs.resolution
ind = 0
for o in self._observations:
if res > o.resolution:
break
ind += 1
self._observations.insert(ind, obs)
self._build_tree()
self._clear_cache()
def add_spectroscopy(self, label='0_0', **props):
"""
Adds spectroscopic measurement to particular star(s) (corresponding to individual model node)
Default 0_0 should be primary star
legal inputs are 'Teff', 'logg', 'feh', and in form (val, err)
"""
if label not in self.leaf_labels:
raise ValueError('No model node named {} (must be in {}). Maybe define models first?'.format(label, self.leaf_labels))
for k,v in props.items():
if k not in self.spec_props:
raise ValueError('Illegal property {} (only {} allowed).'.format(k, self.spec_props))
if len(v) != 2:
raise ValueError('Must provide (value, uncertainty) for {}.'.format(k))
if label not in self.spectroscopy:
self.spectroscopy[label] = {}
for k,v in props.items():
self.spectroscopy[label][k] = v
self._clear_cache()
def add_limit(self, label='0_0', **props):
"""Define limits to spectroscopic property of particular stars.
Usually will be used for 'logg', but 'Teff' and 'feh' will also work.
In form (min, max): e.g., t.add_limit(logg=(3.0,None))
None will be converted to (-)np.inf
"""
if label not in self.leaf_labels:
raise ValueError('No model node named {} (must be in {}). Maybe define models first?'.format(label, self.leaf_labels))
for k,v in props.items():
if k not in self.spec_props:
raise ValueError('Illegal property {} (only {} allowed).'.format(k, self.spec_props))
if len(v) != 2:
raise ValueError('Must provide (min, max) for {}. (`None` is allowed value)'.format(k))
if label not in self.limits:
self.limits[label] = {}
for k,v in props.items():
vmin, vmax = v
if vmin is None:
vmin = -np.inf
if vmax is None:
vmax = np.inf
self.limits[label][k] = (vmin, vmax)
self._clear_cache()
def add_parallax(self, plax, system=0):
if len(plax)!=2:
raise ValueError('Must enter (value,uncertainty).')
if system not in self.systems:
raise ValueError('{} not in systems ({}).'.format(system,self.systems))
self.parallax[system] = plax
self._clear_cache()
def define_models(self, ic, leaves=None, N=1, index=0):
"""
N, index are either integers or lists of integers.
N : number of model stars per observed star
index : index of physical association
leaves: either a list of leaves, or a pattern by which
the leaves are selected (via `select_leaves`)
If these are lists, then they are defined individually for
each leaf.
If `index` is a list, then each entry must be either
an integer or a list of length `N` (where `N` is the corresponding
entry in the `N` list.)
This bugs up if you call it multiple times. If you want
to re-do a call to this function, please re-define the tree.
"""
self.clear_models()
if leaves is None:
leaves = self._get_leaves()
elif type(leaves)==type(''):
leaves = self.select_leaves(leaves)
# Sort leaves by distance, to ensure system 0 will be assigned
# to the main reference star.
if np.isscalar(N):
N = (np.ones(len(leaves))*N)
#if np.size(index) > 1:
# index = [index]
N = np.array(N).astype(int)
if np.isscalar(index):
index = (np.ones_like(N)*index)
index = np.array(index).astype(int)
# Add the appropriate number of model nodes to each
# star in the highest-resoluion image
for s,n,i in zip(leaves, N, index):
# Remove any previous model nodes (should do some checks here?)
s.remove_children()
s.add_model(ic, n, i)
# For each system, make sure tag _0 is the brightest.
self._fix_labels()
self._N = N
self._index = index
self._clear_all_leaves()
def _fix_labels(self):
"""For each system, make sure tag _0 is the brightest, and make sure
system 0 contains the brightest star in the highest-resolution image
"""
for s in self.systems:
mag0 = np.inf
n0 = None
for n in self.get_system(s):
if isinstance(n.parent, DummyObsNode):
continue
mag, _ = n.parent.value
if mag < mag0:
mag0 = mag
n0 = n
# If brightest is not tag _0, then switch them.
if n0 is not None and n0.tag != 0:
n_other = self.get_leaf('{}_{}'.format(s,0))
n_other.tag = n0.tag
n0.tag = 0
def get_system(self, ind):
system = []
for l in self.leaves:
try:
if l.index==ind:
system.append(l)
except AttributeError:
pass
return system
@property
def observations(self):
return self._observations
def select_observations(self, name):
"""Returns nodes whose instrument-band matches 'name'
"""
return [n for n in self.get_obs_nodes() if n.obsname==name]
def clear_models(self):
for n in self:
if isinstance(n, ModelNode):
n.parent.remove_child(n.label)
self._clear_all_leaves()
def trim(self):
"""
Trims leaves from tree that are not observed at highest-resolution level
This is a bit hacky-- what it does is
"""
# Only allow leaves to stay on list (highest-resolution) level
return
for l in self._levels[-2::-1]:
for n in l:
if n.is_leaf:
n.parent.remove_child(n.label)
self._clear_all_leaves() #clears cached list of leaves
def p2pardict(self, p):
"""
Given leaf labels, turns parameter vector into pardict
"""
d = {}
N = self.Nstars
i = 0
for s in self.systems:
age, feh, dist, AV = p[i+N[s]:i+N[s]+4]
for j in xrange(N[s]):
l = '{}_{}'.format(s,j)
mass = p[i+j]
d[l] = [mass, age, feh, dist, AV]
i += N[s] + 4
return d
@property
def param_description(self):
N = self.Nstars
pars = []
for s in self.systems:
for j in xrange(N[s]):
pars.append('mass_{}_{}'.format(s,j))
for p in ['age', 'feh', 'distance', 'AV']:
pars.append('{}_{}'.format(p,s))
return pars
@property
def Nstars(self):
if self._Nstars is None:
N = {}
for n in self.get_model_nodes():
if n.index not in N:
N[n.index] = 1
else:
N[n.index] += 1
self._Nstars = N
return self._Nstars
@property
def systems(self):
# fix this! make sure it is unique!!!
lst = list(chain(*[c.systems for c in self.children]))
return sorted(set(lst))
def print_ascii(self, fout=None, p=None):
pardict = None
if p is not None:
pardict = self.p2pardict(p)
super(ObservationTree, self).print_ascii(fout, pardict)
def lnlike(self, p, use_cache=True):
"""
takes parameter vector, constructs pardict, returns sum of lnlikes of non-leaf nodes
"""
if use_cache and self._cache_key is not None and np.all(p==self._cache_key):
return self._cache_val
self._cache_key = p
pardict = self.p2pardict(p)
# lnlike from photometry
lnl = 0
for n in self:
if n is not self:
lnl += n.lnlike(pardict, use_cache=use_cache)
if not np.isfinite(lnl):
self._cache_val = -np.inf
return -np.inf
# lnlike from spectroscopy
for l in self.spectroscopy:
for prop,(val,err) in self.spectroscopy[l].items():
mod = self.get_leaf(l).evaluate(pardict[l], prop)
lnl += -0.5*(val - mod)**2/err**2
if not | np.isfinite(lnl) | numpy.isfinite |
from utils import CreateDatasetICVLChallenge
from .BaseModel import BDNN
import copy
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
import os
import numpy as np
from . import BasicNetworks
#-----------------------------------------------------------------------------------------------------------------------
## @brief The base model including the necessary data loader for the 2018 CVPR Challenge on spectral super-resolution
#
#-----------------------------------------------------------------------------------------------------------------------
class BaseModel_ICVLChallenge(BDNN):
def __init__(self, track='RealWorld'):
super(BaseModel_ICVLChallenge, self).__init__()
self._track = track
def name(self):
return 'BaseModel_ICVLChallenge'
def _create_data_loader(self):
patchSize = self._patchSize
dataset = CreateDatasetICVLChallenge(0, patchSize, self._track)
# dataset.SetSingleChannelOnly(10)
datasetTrain = copy.deepcopy(dataset)
datasetValidate = copy.deepcopy(dataset)
self.datasetTest = copy.deepcopy(dataset)
if self._enabSave:
dataset.SaveConfig(os.path.join(self._mainPath, "dataConfig"))
# load the training and validation set into memory
if datasetTrain.InitializeSet('train') != 1:
return 1
if datasetValidate.InitializeSet('validation') != 1:
return 1
self._dataLoaderTrain = DataLoader(dataset=datasetTrain, shuffle='True', batch_size=self._batchSize)
self._dataLoaderValid = DataLoader(dataset=datasetValidate, shuffle='True', batch_size=self._batchSize)
def _get_metadata_impl(self):
# save network meta data
modelConfig = {}
modelConfig['General'] = {}
modelConfig['General']['Track'] = self._track
modelConfig['General']['BatchSize'] = self._batchSize
modelConfig['General']['PatchSize'] = self._patchSize
modelConfig['General']['Loss'] = self._name_criterion
modelConfig['Network'] = self._network.get_config()
return modelConfig
def _set_metadata_impl(self, config):
networkInfo = config['Network']
#
if 'track' in config['General']:
self._track = config['General']['track']
else:
self._track = 'RealWorld'
# create the specified network
name = config['Network']['name']
print("Used network: {}".format(name))
if name == 'GenUNetNoPooling':
self._network = BasicNetworks.GenUNetNoPooling()
else:
print("Error, Unkown network type: {}".format(name))
# configure the network accordingly
self._network.set_config(networkInfo)
## @brief Process the specified input using the current model state
#
# The current model state is used to process the specified data, allFiles. The result is written to the path.
def execute_test(self, allFiles, path):
self._set_mode2exec()
#
self._patchSize = 256
print("Performing spectral reconstruction")
for fileOfChoice, img in allFiles.items():
# img, curSpecImg, fileOfChoice = self.datasetTest.GetImagePair(indImg)
print("\t-Current Image: " + fileOfChoice)
imgHeight = img.shape[0]
imgWidth = img.shape[1]
img = np.expand_dims(img, axis=4)
allRGB = torch.from_numpy(img)
allRGB = allRGB.permute(3, 2, 0, 1)
if self._enabCuda:
allRGB = allRGB.cuda()
# allocate final reconstruction
reconstruction = | np.empty((imgHeight, imgWidth, 31)) | numpy.empty |
import itertools
from collections import defaultdict
from math import ceil
import numpy as np
from pycompss.api.api import compss_wait_on
from pycompss.api.parameter import Type, COLLECTION_IN, Depth, COLLECTION_INOUT
from pycompss.api.task import task
from scipy import sparse as sp
from scipy.sparse import issparse, csr_matrix
from sklearn.utils import check_random_state
class Array(object):
""" A distributed 2-dimensional array divided in blocks.
Normally, this class should not be instantiated directly, but created
using one of the array creation routines provided.
Apart from the different methods provided, this class also supports
the following types of indexing:
- ``A[i]`` : returns a single row
- ``A[i, j]`` : returns a single element
- ``A[i:j]`` : returns a set of rows (with ``i`` and ``j`` optional)
- ``A[:, i:j]`` : returns a set of columns (with ``i`` and ``j``
optional)
- ``A[[i,j,k]]`` : returns a set of non-consecutive rows
- ``A[:, [i,j,k]]`` : returns a set of non-consecutive columns
- ``A[i:j, k:m]`` : returns a set of elements (with ``i``, ``j``,
``k``, and ``m`` optional)
Parameters
----------
blocks : list
List of lists of nd-array or spmatrix.
top_left_shape : tuple
A single tuple indicating the shape of the top-left block.
reg_shape : tuple
A single tuple indicating the shape of the regular block.
shape : tuple (int, int)
Total number of elements in the array.
sparse : boolean, optional (default=False)
Whether this array stores sparse data.
Attributes
----------
shape : tuple (int, int)
Total number of elements in the array.
_blocks : list
List of lists of nd-array or spmatrix.
_top_left_shape : tuple
A single tuple indicating the shape of the top-left block. This
can be different from _reg_shape when slicing arrays.
_reg_shape : tuple
A single tuple indicating the shape of regular blocks. Top-left and
and bot-right blocks might have different shapes (and thus, also the
whole first/last blocks of rows/cols).
_n_blocks : tuple (int, int)
Total number of (horizontal, vertical) blocks.
_sparse: boolean
True if this array contains sparse data.
"""
def __init__(self, blocks, top_left_shape, reg_shape, shape, sparse):
self._validate_blocks(blocks)
self._blocks = blocks
self._top_left_shape = top_left_shape
self._reg_shape = reg_shape
self._n_blocks = (len(blocks), len(blocks[0]))
self._shape = shape
self._sparse = sparse
def __str__(self):
return "ds-array(blocks=(...), top_left_shape=%r, reg_shape=%r, " \
"shape=%r, sparse=%r)" % (
self._top_left_shape, self._reg_shape, self.shape,
self._sparse)
def __repr__(self):
return "ds-array(blocks=(...), top_left_shape=%r, reg_shape=%r, " \
"shape=%r, sparse=%r)" % (
self._top_left_shape, self._reg_shape, self.shape,
self._sparse)
def __getitem__(self, arg):
# return a single row
if isinstance(arg, int):
return self._get_by_lst_rows(rows=[arg])
# list of indices for rows
elif isinstance(arg, list) or isinstance(arg, np.ndarray):
return self._get_by_lst_rows(rows=arg)
# slicing only rows
elif isinstance(arg, slice):
# slice only rows
return self._get_slice(rows=arg, cols=slice(None, None))
# we have indices for both dimensions
if not isinstance(arg, tuple):
raise IndexError("Invalid indexing information: %s" % arg)
rows, cols = arg # unpack 2-arguments
# returning a single element
if isinstance(rows, int) and isinstance(cols, int):
return self._get_single_element(i=rows, j=cols)
# all rows (slice : for rows) and list of indices for columns
elif isinstance(rows, slice) and \
(isinstance(cols, list) or isinstance(cols, np.ndarray)):
return self._get_by_lst_cols(cols=cols)
# slicing both dimensions
elif isinstance(rows, slice) and isinstance(cols, slice):
return self._get_slice(rows, cols)
raise IndexError("Invalid indexing information: %s" % str(arg))
@property
def shape(self):
"""
Total shape of the ds-array
"""
return self._shape
@staticmethod
def _validate_blocks(blocks):
if len(blocks) == 0 or len(blocks[0]) == 0:
raise AttributeError('Blocks must a list of lists, with at least'
' an empty numpy/scipy matrix.')
row_length = len(blocks[0])
for i in range(1, len(blocks)):
if len(blocks[i]) != row_length:
raise AttributeError(
'All rows must contain the same number of blocks.')
@staticmethod
def _merge_blocks(blocks):
"""
Helper function that merges the _blocks attribute of a ds-array into
a single ndarray / sparse matrix.
"""
sparse = None
b0 = blocks[0][0]
if sparse is None:
sparse = issparse(b0)
if sparse:
ret = sp.bmat(blocks, format=b0.getformat(), dtype=b0.dtype)
else:
ret = np.block(blocks)
return ret
@staticmethod
def _get_out_blocks(n_blocks):
"""
Helper function that builds empty lists of lists to be filled as
parameter of type COLLECTION_INOUT
"""
return [[object() for _ in range(n_blocks[1])]
for _ in range(n_blocks[0])]
@staticmethod
def _broadcast_shapes(x, y):
if len(x) != 1 or len(y) != 1:
raise IndexError("shape mismatch: indexing arrays could "
"not be broadcast together with shapes %s %s" %
(len(x), len(y)))
return zip(*itertools.product(*[x, y]))
def _get_row_shape(self, row_idx):
if row_idx == 0:
return self._top_left_shape[0], self.shape[1]
if row_idx < self._n_blocks[0] - 1:
return self._reg_shape[0], self.shape[1]
# this is the last chunk of rows, number of rows might be smaller
reg_blocks = self._n_blocks[0] - 2
if reg_blocks < 0:
reg_blocks = 0
n_r = \
self.shape[0] - self._top_left_shape[0] - reg_blocks * \
self._reg_shape[0]
return n_r, self.shape[1]
def _get_col_shape(self, col_idx):
if col_idx == 0:
return self.shape[0], self._top_left_shape[1]
if col_idx < self._n_blocks[1] - 1:
return self.shape[0], self._reg_shape[1]
# this is the last chunk of cols, number of cols might be smaller
reg_blocks = self._n_blocks[1] - 2
if reg_blocks < 0:
reg_blocks = 0
n_c = \
self.shape[1] - self._top_left_shape[1] - \
reg_blocks * self._reg_shape[1]
return self.shape[0], n_c
def _iterator(self, axis=0):
# iterate through rows
if axis == 0 or axis == 'rows':
for i, row in enumerate(self._blocks):
row_shape = self._get_row_shape(i)
yield Array(blocks=[row], top_left_shape=self._top_left_shape,
reg_shape=self._reg_shape, shape=row_shape,
sparse=self._sparse)
# iterate through columns
elif axis == 1 or axis == 'columns':
for j in range(self._n_blocks[1]):
col_shape = self._get_col_shape(j)
col_blocks = [[self._blocks[i][j]] for i in
range(self._n_blocks[0])]
yield Array(blocks=col_blocks,
top_left_shape=self._top_left_shape,
reg_shape=self._reg_shape,
shape=col_shape, sparse=self._sparse)
else:
raise Exception(
"Axis must be [0|'rows'] or [1|'columns']. Got: %s" % axis)
def _get_containing_block(self, i, j):
"""
Returns the indices of the block containing coordinate (i, j)
"""
bi0, bj0 = self._top_left_shape
bn, bm = self._reg_shape
# If first block is irregular, we need to add an offset to compute the
# containing block indices
offset_i, offset_j = bn - bi0, bm - bj0
block_i = (i + offset_i) // bn
block_j = (j + offset_j) // bm
# if blocks are out of bounds, assume the element belongs to last block
if block_i >= self._n_blocks[0]:
block_i = self._n_blocks[0] - 1
if block_j >= self._n_blocks[1]:
block_j = self._n_blocks[1] - 1
return block_i, block_j
def _coords_in_block(self, block_i, block_j, i, j):
"""
Return the conversion of the coords (i, j) in ds-array space to
coordinates in the given block (block_i, block_j) space.
"""
local_i, local_j = i, j
if block_i > 0:
reg_blocks = (block_i - 1) if (block_i - 1) >= 0 else 0
local_i = \
i - self._top_left_shape[0] - \
reg_blocks * self._reg_shape[0]
if block_j > 0:
reg_blocks = (block_j - 1) if (block_j - 1) >= 0 else 0
local_j = \
j - self._top_left_shape[1] - \
reg_blocks * self._reg_shape[1]
return local_i, local_j
def _get_single_element(self, i, j):
"""
Return the element in (i, j) as a ds-array with a single element.
"""
# we are returning a single element
if i > self.shape[0] or j > self.shape[0]:
raise IndexError("Shape is %s" % self.shape)
bi, bj = self._get_containing_block(i, j)
local_i, local_j = self._coords_in_block(bi, bj, i, j)
block = self._blocks[bi][bj]
# returns an list containing a single element
element = _get_item(local_i, local_j, block)
return Array(blocks=[[element]], top_left_shape=(1, 1),
reg_shape=(1, 1), shape=(1, 1), sparse=False)
def _get_slice(self, rows, cols):
"""
Returns a slice of the ds-array defined by the slices rows / cols.
Only steps (as defined by slice.step) with value 1 can be used.
"""
if (rows.step is not None and rows.step > 1) or \
(cols.step is not None and cols.step > 1):
raise NotImplementedError("Variable steps not supported, contact"
" the dislib team or open an issue "
"in github.")
# rows and cols are read-only
r_start, r_stop = rows.start, rows.stop
c_start, c_stop = cols.start, cols.stop
if r_start is None:
r_start = 0
if c_start is None:
c_start = 0
if r_stop is None or r_stop > self.shape[0]:
r_stop = self.shape[0]
if c_stop is None or c_stop > self.shape[1]:
c_stop = self.shape[1]
if r_start < 0 or r_stop < 0 or c_start < 0 or c_stop < 0:
raise NotImplementedError("Negative indexes not supported, contact"
" the dislib team or open an issue "
"in github.")
# get the coordinates of top-left and bot-right corners
i_0, j_0 = self._get_containing_block(r_start, c_start)
i_n, j_n = self._get_containing_block(r_stop, c_stop)
# Number of blocks to be returned
n_blocks = i_n - i_0 + 1
m_blocks = j_n - j_0 + 1
out_blocks = self._get_out_blocks((n_blocks, m_blocks))
i_indices = range(i_0, i_n + 1)
j_indices = range(j_0, j_n + 1)
for out_i, i in enumerate(i_indices):
for out_j, j in enumerate(j_indices):
top, left, bot, right = None, None, None, None
if out_i == 0:
top, _ = self._coords_in_block(i_0, j_0, r_start, c_start)
if out_i == len(i_indices) - 1:
bot, _ = self._coords_in_block(i_n, j_n, r_stop, c_stop)
if out_j == 0:
_, left = self._coords_in_block(i_0, j_0, r_start, c_start)
if out_j == len(j_indices) - 1:
_, right = self._coords_in_block(i_n, j_n, r_stop, c_stop)
boundaries = (top, left, bot, right)
fb = _filter_block(block=self._blocks[i][j],
boundaries=boundaries)
out_blocks[out_i][out_j] = fb
# Shape of the top left block
top, left = self._coords_in_block(0, 0, r_start,
c_start)
bi0 = self._reg_shape[0] - (top % self._reg_shape[0])
bj0 = self._reg_shape[1] - (left % self._reg_shape[1])
# Regular blocks shape is the same
bn, bm = self._reg_shape
out_shape = r_stop - r_start, c_stop - c_start
res = Array(blocks=out_blocks, top_left_shape=(bi0, bj0),
reg_shape=(bn, bm), shape=out_shape, sparse=self._sparse)
return res
def _get_by_lst_rows(self, rows):
"""
Returns a slice of the ds-array defined by the lists of indices in
rows.
"""
# create dict where each key contains the adjusted row indices for that
# block of rows
adj_row_idxs = defaultdict(list)
for row_idx in rows:
containing_block = self._get_containing_block(row_idx, 0)[0]
adj_idx = self._coords_in_block(containing_block, 0, row_idx, 0)[0]
adj_row_idxs[containing_block].append(adj_idx)
row_blocks = []
for rowblock_idx, row in enumerate(self._iterator(axis='rows')):
# create an empty list for the filtered row (single depth)
rows_in_block = len(adj_row_idxs[rowblock_idx])
# only launch the task if we are selecting rows from that block
if rows_in_block > 0:
row_block = _filter_row(blocks=row._blocks,
rows=adj_row_idxs[rowblock_idx],
cols=None)
row_blocks.append((rows_in_block, [row_block]))
# now we need to merge the rowblocks until they have as much rows as
# self._reg_shape[0] (i.e. number of rows per block)
n_rows = 0
to_merge = []
final_blocks = []
for rows_in_block, row in row_blocks:
to_merge.append(row)
n_rows += rows_in_block
# enough rows to merge into a row_block
if n_rows > self._reg_shape[0]:
out_blocks = [object() for _ in range(self._n_blocks[1])]
new_rb = _merge_rows(to_merge, out_blocks, self._reg_shape)
final_blocks.append(new_rb)
if n_rows > 0:
out_blocks = [object() for _ in range(self._n_blocks[1])]
_merge_rows(to_merge, out_blocks, self._reg_shape)
final_blocks.append(out_blocks)
return Array(blocks=final_blocks, top_left_shape=self._top_left_shape,
reg_shape=self._reg_shape,
shape=(len(rows), self._shape[1]), sparse=self._sparse)
def _get_by_lst_cols(self, cols):
"""
Returns a slice of the ds-array defined by the lists of indices in
cols.
"""
# create dict where each key contains the adjusted row indices for that
# block of rows
adj_col_idxs = defaultdict(list)
for col_idx in cols:
containing_block = self._get_containing_block(0, col_idx)[1]
adj_idx = self._coords_in_block(0, containing_block, 0, col_idx)[1]
adj_col_idxs[containing_block].append(adj_idx)
col_blocks = []
for colblock_idx, col in enumerate(self._iterator(axis='columns')):
# create an empty list for the filtered row (single depth)
cols_in_block = len(adj_col_idxs[colblock_idx])
# only launch the task if we are selecting rows from that block
if cols_in_block > 0:
col_block = _filter_row(blocks=col._blocks,
rows=None,
cols=adj_col_idxs[colblock_idx])
col_blocks.append((cols_in_block, col_block))
# now we need to merge the rowblocks until they have as much rows as
# self._reg_shape[0] (i.e. number of rows per block)
n_cols = 0
to_merge = []
final_blocks = []
for cols_in_block, col in col_blocks:
to_merge.append(col)
n_cols += cols_in_block
# enough cols to merge into a col_block
if n_cols > self._reg_shape[0]:
out_blocks = [object() for _ in range(self._n_blocks[1])]
new_rb = _merge_cols(to_merge, out_blocks, self._reg_shape)
final_blocks.append(new_rb)
if n_cols > 0:
out_blocks = [object() for _ in range(self._n_blocks[1])]
_merge_cols(to_merge, out_blocks, self._reg_shape)
final_blocks.append(out_blocks)
# list are in col-order transpose them for the correct ordering
final_blocks = list(map(list, zip(*final_blocks)))
return Array(blocks=final_blocks, top_left_shape=self._top_left_shape,
reg_shape=self._reg_shape,
shape=(self._shape[0], len(cols)), sparse=self._sparse)
def transpose(self, mode='rows'):
"""
Returns the transpose of the ds-array following the method indicated by
mode. 'All' uses a single task to transpose all the blocks (slow with
high number of blocks). 'rows' and 'columns' transpose each block of
rows or columns independently (i.e. a task per row/col block).
Parameters
----------
mode : string, optional (default=rows)
Array of samples.
Returns
-------
dsarray : ds-array
A transposed ds-array.
"""
if mode == 'all':
n, m = self._n_blocks[0], self._n_blocks[1]
out_blocks = self._get_out_blocks((n, m))
_transpose(self._blocks, out_blocks)
elif mode == 'rows':
out_blocks = []
for r in self._iterator(axis=0):
_blocks = self._get_out_blocks(r._n_blocks)
_transpose(r._blocks, _blocks)
out_blocks.append(_blocks[0])
elif mode == 'columns':
out_blocks = [[] for _ in range(self._n_blocks[0])]
for i, c in enumerate(self._iterator(axis=1)):
_blocks = self._get_out_blocks(c._n_blocks)
_transpose(c._blocks, _blocks)
for i2 in range(len(_blocks)):
out_blocks[i2].append(_blocks[i2][0])
else:
raise Exception(
"Unknown transpose mode '%s'. Options are: [all|rows|columns]"
% mode)
blocks_t = list(map(list, zip(*out_blocks)))
bi0, bj0 = self._top_left_shape[0], self._top_left_shape[1]
bn, bm = self._reg_shape[0], self._reg_shape[1]
new_shape = self.shape[1], self.shape[0]
# notice blocks shapes are transposed
return Array(blocks_t, top_left_shape=(bj0, bi0), reg_shape=(bm, bn),
shape=new_shape, sparse=self._sparse)
def min(self, axis=0):
"""
Returns the minimum along the given axis.
Parameters
----------
axis : int, optional (default=0)
Returns
-------
min : ds-array
Minimum along axis.
"""
return apply_along_axis(np.min, axis, self)
def max(self, axis=0):
"""
Returns the maximum along the given axis.
Parameters
----------
axis : int, optional (default=0)
Returns
-------
max : ds-array
Maximum along axis.
"""
return apply_along_axis(np.max, axis, self)
def sum(self, axis=0):
"""
Returns the sum along the given axis.
Parameters
----------
axis : int, optional (default=0)
Returns
-------
sum : ds-array
Sum along axis.
"""
return apply_along_axis(np.sum, axis, self)
def mean(self, axis=0):
"""
Returns the mean along the given axis.
Parameters
----------
axis : int, optional (default=0)
Returns
-------
mean : ds-array
Mean along axis.
"""
return apply_along_axis(np.mean, axis, self)
def collect(self):
"""
Collects the contents of this ds-array and returns the equivalent
in-memory array that this ds-array represents. This method creates a
synchronization point in the execution of the application.
Warning: This method may fail if the ds-array does not fit in
memory.
Returns
-------
array : nd-array or spmatrix
The actual contents of the ds-array.
"""
self._blocks = compss_wait_on(self._blocks)
res = self._merge_blocks(self._blocks)
if not self._sparse:
res = np.squeeze(res)
return res
def array(x, block_size):
"""
Loads data into a Distributed Array.
Parameters
----------
x : spmatrix or array-like, shape=(n_samples, n_features)
Array of samples.
block_size : (int, int)
Block sizes in number of samples.
Returns
-------
dsarray : ds-array
A distributed representation of the data divided in blocks.
"""
sparse = issparse(x)
if sparse:
x = csr_matrix(x, copy=True)
else:
x = np.array(x, copy=True)
if len(x.shape) < 2:
raise ValueError("Input array must have two dimensions.")
bn, bm = block_size
blocks = []
for i in range(0, x.shape[0], bn):
row = [x[i: i + bn, j: j + bm] for j in range(0, x.shape[1], bm)]
blocks.append(row)
sparse = issparse(x)
arr = Array(blocks=blocks, top_left_shape=block_size,
reg_shape=block_size, shape=x.shape, sparse=sparse)
return arr
def random_array(shape, block_size, random_state=None):
"""
Returns a distributed array of random floats in the open interval [0.0,
1.0). Values are from the "continuous uniform" distribution over the
stated interval.
Parameters
----------
shape : tuple of two ints
Shape of the output ds-array.
block_size : tuple of two ints
Size of the ds-array blocks.
random_state : int or RandomState, optional (default=None)
Seed or numpy.random.RandomState instance to generate the random
numbers.
Returns
-------
dsarray : ds-array
Distributed array of random floats.
"""
if shape[0] < block_size[0] or shape[1] < block_size[1]:
raise ValueError("Block size is greater than the array")
r_state = check_random_state(random_state)
n_blocks = (int(np.ceil(shape[0] / block_size[0])),
int(np.ceil(shape[1] / block_size[1])))
blocks = list()
for row_idx in range(n_blocks[0]):
blocks.append(list())
for col_idx in range(n_blocks[1]):
b_size0, b_size1 = block_size
if row_idx == n_blocks[0] - 1:
b_size0 = shape[0] - (n_blocks[0] - 1) * block_size[0]
if col_idx == n_blocks[1] - 1:
b_size1 = shape[1] - (n_blocks[1] - 1) * block_size[1]
seed = r_state.randint(np.iinfo(np.int32).max)
blocks[-1].append(_random_block((b_size0, b_size1), seed))
return Array(blocks, top_left_shape=block_size, reg_shape=block_size,
shape=shape, sparse=False)
def apply_along_axis(func, axis, x, *args, **kwargs):
""" Apply a function to slices along the given axis.
Execute func(a, *args, **kwargs) where func operates on nd-arrays and a
is a slice of arr along axis. The size of the slices is determined
by the blocks shape of x.
func must meet the following conditions:
- Take an nd-array as argument
- Accept `axis` as a keyword argument
- Return an array-like structure
Parameters
----------
func : function
This function should accept nd-arrays and an axis argument. It is
applied to slices of arr along the specified axis.
axis : integer
Axis along which arr is sliced. Can be 0 or 1.
x : ds-array
Input distributed array.
args : any
Additional arguments to func.
kwargs : any
Additional named arguments to func.
Returns
-------
out : ds-array
The output array. The shape of out is identical to the shape of arr,
except along the axis dimension. The output ds-array is dense
regardless of the type of the input array.
Examples
--------
>>> import dislib as ds
>>> import numpy as np
>>> x = ds.random_array((100, 100), block_size=(25, 25))
>>> mean = ds.apply_along_axis(np.mean, 0, x)
>>> print(mean.collect())
"""
if axis != 0 and axis != 1:
raise ValueError("Axis must be 0 or 1.")
tlshape = x._top_left_shape
bshape = x._reg_shape
shape = x.shape
out_blocks = list()
for block in x._iterator(axis=(not axis)):
out = _block_apply(func, axis, block._blocks, *args, **kwargs)
out_blocks.append(out)
if axis == 0:
blocks = [out_blocks]
out_tlbshape = (1, tlshape[1])
out_bshape = (1, bshape[1])
out_shape = (1, shape[1])
else:
blocks = [[block] for block in out_blocks]
out_tlbshape = (tlshape[0], 1)
out_bshape = (bshape[0], 1)
out_shape = (shape[0], 1)
return Array(blocks, top_left_shape=out_tlbshape, reg_shape=out_bshape,
shape=out_shape, sparse=False)
def load_svmlight_file(path, block_size, n_features, store_sparse):
""" Loads a SVMLight file into a distributed array.
Parameters
----------
path : string
File path.
block_size : tuple (int, int)
Size of the blocks for the output ds-array.
n_features : int
Number of features.
store_sparse : boolean
Whether to use scipy.sparse data structures to store data. If False,
numpy.array is used instead.
Returns
-------
x, y : (ds-array, ds-array)
A distributed representation (ds-array) of the X and y.
"""
n, m = block_size
lines = []
x_blocks, y_blocks = [], []
n_rows = 0
with open(path, "r") as f:
for line in f:
n_rows += 1
lines.append(line.encode())
if len(lines) == n:
# line 0 -> X, line 1 -> y
out_blocks = Array._get_out_blocks((1, ceil(n_features / m)))
out_blocks.append([object()])
# out_blocks.append([])
_read_svmlight(lines, out_blocks, col_size=m,
n_features=n_features,
store_sparse=store_sparse)
# we append only the list forming the row (out_blocks depth=2)
x_blocks.append(out_blocks[0])
y_blocks.append(out_blocks[1])
lines = []
if lines:
out_blocks = Array._get_out_blocks((1, ceil(n_features / m)))
out_blocks.append([object()])
_read_svmlight(lines, out_blocks, col_size=m,
n_features=n_features, store_sparse=store_sparse)
# we append only the list forming the row (out_blocks depth=2)
x_blocks.append(out_blocks[0])
y_blocks.append(out_blocks[1])
x = Array(x_blocks, top_left_shape=block_size, reg_shape=block_size,
shape=(n_rows, n_features), sparse=store_sparse)
# y has only a single line but it's treated as a 'column'
y = Array(y_blocks, top_left_shape=(n, 1), reg_shape=(n, 1),
shape=(n_rows, 1), sparse=False)
return x, y
def load_txt_file(path, block_size, delimiter=","):
""" Loads a text file into a distributed array.
Parameters
----------
path : string
File path.
block_size : tuple (int, int)
Size of the blocks of the array.
delimiter : string, optional (default=",")
String that separates columns in the file.
Returns
-------
x : ds-array
A distributed representation of the data divided in blocks.
"""
with open(path, "r") as f:
first_line = f.readline().strip()
n_cols = len(first_line.split(delimiter))
n_blocks = ceil(n_cols / block_size[1])
blocks = []
lines = []
n_lines = 0
with open(path, "r") as f:
for line in f:
n_lines += 1
lines.append(line.encode())
if len(lines) == block_size[0]:
out_blocks = [object() for _ in range(n_blocks)]
_read_lines(lines, block_size[1], delimiter, out_blocks)
blocks.append(out_blocks)
lines = []
if lines:
out_blocks = [object() for _ in range(n_blocks)]
_read_lines(lines, block_size[1], delimiter, out_blocks)
blocks.append(out_blocks)
return Array(blocks, top_left_shape=block_size, reg_shape=block_size,
shape=(n_lines, n_cols), sparse=False)
@task(out_blocks=COLLECTION_INOUT, returns=1)
def _read_lines(lines, block_size, delimiter, out_blocks):
samples = np.genfromtxt(lines, delimiter=delimiter)
for i, j in enumerate(range(0, samples.shape[1], block_size)):
out_blocks[i] = samples[:, j:j + block_size]
@task(out_blocks={Type: COLLECTION_INOUT, Depth: 2})
def _read_svmlight(lines, out_blocks, col_size, n_features, store_sparse):
from tempfile import SpooledTemporaryFile
from sklearn.datasets import load_svmlight_file
# Creating a tmp file to use load_svmlight_file method should be more
# efficient than parsing the lines manually
tmp_file = SpooledTemporaryFile(mode="wb+", max_size=2e8)
tmp_file.writelines(lines)
tmp_file.seek(0)
x, y = load_svmlight_file(tmp_file, n_features)
if not store_sparse:
x = x.toarray()
# tried also converting to csc/ndarray first for faster splitting but it's
# not worth. Position 0 contains the X
for i in range(ceil(n_features / col_size)):
out_blocks[0][i] = x[:, i * col_size:(i + 1) * col_size]
# Position 1 contains the y block
out_blocks[1][0] = y.reshape(-1, 1)
@task(returns=1)
def _get_item(i, j, block):
"""
Returns a single item from the block. Coords must be in block space.
"""
return block[i, j]
@task(blocks={Type: COLLECTION_IN, Depth: 2}, returns=1)
def _filter_row(blocks, rows, cols):
"""
Returns an array resulting of selecting rows:cols of the input blocks
"""
data = Array._merge_blocks(blocks)
if issparse(blocks[0][0]):
# sparse indexes element by element we need to do the cartesian
# product of indices to get all coords
rows, cols = zip(*itertools.product(*[rows, cols]))
if rows is None:
return data[:, cols]
elif cols is None:
return data[rows, :]
return data[rows, cols]
@task(blocks={Type: COLLECTION_IN, Depth: 2},
out_blocks={Type: COLLECTION_INOUT, Depth: 1})
def _merge_rows(blocks, out_blocks, blocks_shape):
"""
Merges the blocks into a single list of blocks where each block has bn
as number of rows (the number of cols remains the same per block).
"""
bn, bm = blocks_shape
data = Array._merge_blocks(blocks)
for j in range(0, ceil(data.shape[1] / bm)):
out_blocks[j] = data[:bn, j * bm: (j + 1) * bm]
@task(blocks={Type: COLLECTION_IN, Depth: 1},
out_blocks={Type: COLLECTION_INOUT, Depth: 1})
def _merge_cols(blocks, out_blocks, blocks_shape):
"""
Merges the blocks into a single list of blocks where each block has bn
as number of rows (the number of cols remains the same per block).
"""
bn, bm = blocks_shape
data = Array._merge_blocks(blocks)
for i in range(0, ceil(data.shape[0] / bn)):
out_blocks[i] = data[i * bn: (i + 1) * bn, :bm]
@task(returns=1)
def _filter_block(block, boundaries):
"""
Returns the slice of block defined by boundaries.
Boundaries are the (x, y) coordinates of the top-left corner (i_0, j_0) and
the bot-right one (i_n, j_n).
"""
i_0, j_0, i_n, j_n = boundaries
res = block[i_0:i_n, j_0:j_n]
return res
@task(blocks={Type: COLLECTION_IN, Depth: 2},
out_blocks={Type: COLLECTION_INOUT, Depth: 2})
def _transpose(blocks, out_blocks):
for i in range(len(blocks)):
for j in range(len(blocks[i])):
out_blocks[i][j] = blocks[i][j].transpose()
@task(returns=np.array)
def _random_block(shape, seed):
| np.random.seed(seed) | numpy.random.seed |
'''This module provides the figures for the accompanying Jupyter notebook'''
import matplotlib.pyplot as plt, numpy as np, pandas as pd
from IPython.display import display
from scipy.interpolate import lagrange, interp1d, Akima1DInterpolator, CubicSpline, PchipInterpolator, CubicHermiteSpline
def figure1():
plt.figure(figsize=(6.0, 9.0))
xi = np.arange(0, 10, 2) + np.random.random(5) * 2.0
yi = 1.5 + np.cos(xi) - np.cos(np.pi * xi) / 4.0
x = np.linspace(0, 10, 51)
y = 1.5 + np.cos(x) - np.cos(np.pi * x) / 4.0
plt.subplot(4, 2, (1, 4))
plt.axhline(0.0, color='k', lw=0.5)
plt.plot(x, y, ':k', xi, yi, 'ok')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('original')
plt.xticks([], [])
for figure, kind in enumerate(('zero', 'linear', 'quadratic', 'cubic')):
plt.subplot(4, 2, figure + 5)
plt.axhline(0.0, color='k', lw=0.5)
spline = interp1d(xi, yi, kind=kind)
for i in range(4):
x = np.linspace(xi[i], xi[i + 1], 51)
plt.plot(x, spline(x), ':')
if figure > 1:
plt.xlabel('$x$')
else:
plt.xticks([], [])
if figure % 2 == 0: plt.ylabel('$y$')
plt.plot(xi, yi, 'ok')
plt.title(kind + ' spline')
return 'Types of splines'
def figure2():
xi = np.array([-5.0, -4.0, -3.0, 3.0, 4.0, 5.0])
yi = np.array([1.0, 1.0, 2.0, -1.0, 1.0, 1.0])
spline = Akima1DInterpolator(xi, yi)
x = np.linspace(-5.5, 5.5, 111)
y = spline(x)
plt.axhline(0., color='k', lw=.5); plt.axvline(0., color='k', lw=.5)
plt.plot(x, y, '-')
plt.plot(xi, yi, 'ok')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('Akima1DInterpolator')
def figure3():
xi = np.array([-5.0, -4.0, -3.0, 3.0, 4.0, 5.0])
yi = np.array([1.0, 1.0, 2.0, -1.0, 1.0, 1.0])
x = np.linspace(-5.5, 5.5, 111)
plt.axhline(0., color='k', lw=.5); plt.axvline(0., color='k', lw=.5)
for bc_type in ('not-a-knot', 'periodic', 'clamped', 'natural'):
spline = CubicSpline(xi, yi, bc_type=bc_type)
y = spline(x)
plt.plot(x, y, '-', label=bc_type)
plt.plot(xi, yi, 'ok')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('CubicSpline')
plt.legend()
def figure4():
xi = np.array([-5.0, -4.0, -3.0, 3.0, 4.0, 5.0])
yi = np.array([1.0, 1.0, 2.0, -1.0, 1.0, 1.0])
spline = PchipInterpolator(xi, yi)
x = np.linspace(-5.5, 5.5, 111)
y = spline(x)
plt.axhline(0., color='k', lw=.5); plt.axvline(0., color='k', lw=.5)
plt.plot(x, y, '-')
plt.plot(xi, yi, 'ok')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('PchipInterpolator')
def figure5():
xi = np.array([-5.0, -4.0, -3.0, 3.0, 4.0, 5.0])
yi = np.array([1.0, 1.0, 2.0, -1.0, 1.0, 1.0])
dyi = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
spline = CubicHermiteSpline(xi, yi, dyi)
x = np.linspace(-5.5, 5.5, 111)
y = spline(x)
plt.axhline(0., color='k', lw=.5); plt.axvline(0., color='k', lw=.5)
plt.plot(x, y, '-')
plt.plot(xi, yi, 'ok')
plt.plot(xi[np.newaxis, :] + np.array([[-.25], [.25]]), yi[np.newaxis, :] + np.array([[-.25], [.25]]), '-k')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('CubicHermiteSpline')
def figure6():
xi = np.arange(0, 10, 2) + np.random.random(5) * 2.0
yi = 1.5 + np.cos(xi) - np.cos(np.pi * xi) / 4.0
spline = interp1d(xi, yi, kind='nearest', fill_value='extrapolate')
x = np.linspace(0, 10, 201)
y = 1.5 + np.cos(x) - np.cos(np.pi * x) / 4.0
plt.axhline(0.0, color='k', lw=0.5)
plt.plot(x, y, ':k')
plt.plot(x, spline(x), '-')
plt.plot(xi, yi, 'ok')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('Nearest-neighbor spline')
def figure7():
from scipy.interpolate import interp1d
xi, yi = np.array([0.0, 1.0, 2.0, 3.0]), np.array([0.0, 0.5, 2.0, 0.5])
spline = interp1d(xi, yi, kind='nearest', fill_value='extrapolate')
x = np.linspace(-0.2, 3.2, 103)
plt.axhline(0.0, color='k', lw=0.5); plt.axvline(0.0, color='k', lw=0.5)
plt.plot(xi, yi, 'ok')
plt.plot(x, spline(x), ':k')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('Example')
plt.grid(True); plt.ylim(-3.0, 3.0)
def figure8():
xi = np.arange(0, 10, 2) + np.random.random(5) * 2.0
yi = 1.5 + np.cos(xi) - np.cos(np.pi * xi) / 4.0
spline = interp1d(xi, yi, kind='linear', fill_value='extrapolate')
x = np.linspace(0, 10, 201)
y = 1.5 + np.cos(x) - np.cos(np.pi * x) / 4.0
plt.axhline(0.0, color='k', lw=0.5)
plt.plot(x, y, ':k')
plt.plot(x, spline(x), '-')
plt.plot(xi, yi, 'ok')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('Linear spline')
def figure9():
xi = np.array([3.0, 7.0])
yi = 1.5 + np.cos(xi) - np.cos(np.pi * xi) / 4.0
x = np.linspace(0, 10, 201)
y = 1.5 + np.cos(x) - np.cos(np.pi * x) / 4.0
plt.axhline(0.0, color='k', lw=0.5)
plt.plot(x, y, ':k')
plt.plot([0.0, 10.0], [yi[0], yi[0]], '-', label='$q_i(x)$')
plt.plot([0.0, 10.0], [yi[1], yi[1]], '-', label='$q_{i+1}(x)$')
plt.plot(xi, yi, 'ok')
plt.xlabel('$x$'); plt.ylabel('$y$')
plt.title('Weighted averaging')
plt.legend()
def figure10():
xi = np.array([3.0, 7.0])
yi = 1.5 + np.cos(xi) - np.cos(np.pi * xi) / 4.0
x = np.linspace(0, 10, 201)
y = 1.5 + np.cos(x) - np.cos(np.pi * x) / 4.0
plt.figure(figsize=(6.0, 6.0))
plt.subplot(3, 1, (1, 2))
plt.axhline(0.0, color='k', lw=0.5)
plt.plot(x, y, ':k')
plt.plot([0.0, 10.0], [yi[0], yi[0]], '-', label='$q_i(x)$')
plt.plot([0.0, 10.0], [yi[1], yi[1]], '-', label='$q_{i+1}(x)$')
plt.plot(xi, yi, 'ok')
plt.ylabel('$y$')
plt.title('Weighted averaging')
plt.legend(); plt.xticks([], [])
plt.subplot(3, 1, 3)
plt.plot([0.0, xi[0], xi[1], 10.0], [1.0, 1.0, 0.0, 0.0], '-', label='$w_i(x)$')
plt.plot([0.0, xi[0], xi[1], 10.0], [0.0, 0.0, 1.0, 1.0], '-', label='$1-w_i(x)$')
plt.xlabel('$x$'); plt.ylabel('$w$')
plt.legend()
return 'Weighted averaging'
def figure11():
xi = | np.array([3.0, 7.0]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
from pyml.logger import logger
from pyml.metrics.classification import precision_score
from pyml.preprocessing import StandardScaler
import math
def sigmoid(x):
"""
Compute the sigmoid of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- sigmoid(x)
"""
s = 1/(1+np.exp(-x))
return s
def nn_model_test_case():
np.random.seed(1)
X_assess = np.random.randn(2, 3)
Y_assess = np.random.randn(1, 3)
return X_assess, Y_assess
def random_mini_batches(X, Y, mini_batch_size = 64, seed = None):
"""
X : shape(n_features, n_samples)
Y : shape(1, n_samples)
"""
if seed is not None:
np.random.seed(seed)
m = X.shape[1]
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1,m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[:, k * mini_batch_size : (k+1) * mini_batch_size]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size : (k+1) * mini_batch_size]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : m]
mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size : m]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
class MLPClassifier():
def __init__(self, learning_rate=1.2, hidden_size=4, num_iterations=10000, optimizer='gd', mini_batch = 64 , random_state = 3, print_intervel=10):
np.random.seed(random_state)
self.hidden_size = hidden_size
self.num_iterations = num_iterations
self.parameters = {}
self.optimizer = optimizer
self.learning_rate = learning_rate
self.mini_batch = mini_batch
self.information = {
'train_loss' : [],
'valid_loss' : [],
'cost' : []
}
self.mini_batches = []
self.current_mini_batch_index = 0
self.print_intervel = print_intervel
# self.train_X
# self.train_Y
# self.valid_X
# self.valid_Y
def initialize_parameters(self, n_x, n_h, n_y):
"""
初始化参数
"""
| np.random.seed(2) | numpy.random.seed |
import copy
import numpy as np
from yamate.utils import mathroutines as mr
from yamate.materials import material
class Properties:
names = [
"mu", "nu", "Bulk",
"kfa", "kc", "keta", "Sy0", "kh",
"s0", "scv", "sg", "sz", "sb",
"knh", "kHiso", "kcH", "FlagHardening",
"knd", "km", "kR", "kg", "kS", "kN", "threshold",
"FlagPlasDam", "FlagHidrDam", "params", "alpha_guess"]
def __init__(self,
mu=None, nu=None, Bulk=None,
kfa=None, kc=None, keta=None, Sy0=None, kh=None,
s0=None, scv=None, sg=None, sz=None, sb=None,
knh=None, kHiso=None, kcH=None, FlagHardening=1,
knd=None, km=None, kR=None, kg=None, kS=None, kN=None, threshold=None,
FlagPlasDam=1, FlagHidrDam=1, params = np.ones(3), alpha_guess=0.0
):
# Hyperelastic
self.mu = mu
self.nu = nu
self.Bulk = Bulk
#Viscoplastic
self.kfa = kfa
self.kc = kc
self.keta = keta
self.Sy0 = Sy0
self.kh = kh
self.s0 = s0
self.scv = scv
self.sg = sg
self.sz = sz
self.sb = sb
# Isotropic Hardening
self.FlagHardening = FlagHardening
self.knh = knh
self.kHiso = kHiso
self.kcH = kcH
# Damage
self.FlagPlasDam = FlagPlasDam
self.threshold = threshold
self.kS = kS
self.kN = kN
self.FlagHidrDam = FlagHidrDam
self.knd = knd
self.km = km
self.kR = kR
self.kg = kg
# Numerical Integration Parameters
self.params = params
# Tolerance Local-Newton Parameters
self.alpha_guess = alpha_guess
class VariationalViscoHydrolysis(material.Material):
name = "variational_visco_hydro"
def __init__(self, props={}):
self.state.Fpn = np.eye(3)
self.state.vin = np.zeros(10)
self.state.vin[0] = 1.0
self.state.timen = 0.0
self.properties = Properties(**props)
def hencky(self, props, etr, Ei):
eps = np.zeros(3)
G = props.mu
if ( etr.shape == (3,) ) :
eps = np.array([ etr[0] , etr[1], etr[2]])
else :
eps[0] = mr.tensor_inner_product(etr,Ei[:,:,0])
eps[1] = mr.tensor_inner_product(etr,Ei[:,:,1])
eps[2] = mr.tensor_inner_product(etr,Ei[:,:,2])
vdWtr = 2 * G * eps
dWtr = np.eye(3)
dWtr[0,0] = vdWtr[0]
dWtr[1,1] = vdWtr[1]
dWtr[2,2] = vdWtr[2]
d2Wtr = 2*G * np.eye(3)
energye = G*(eps[0]**2 + eps[1]**2 + eps[2]**2)
return dWtr, d2Wtr, energye
def kappa_functions(self, props, alpha):
if props.FlagHardening == 1 :
kappa = props.kHiso * alpha
dkappa = props.kHiso
energyp = 0.5 * props.kHiso * (alpha ** 2.0)
elif props.FlagHardening == 2 :
kappa = props.kHiso * ( 1.0 - np.exp(-props.knh*alpha) ) + props.kcH * alpha
dkappa = props.kHiso * props.knh * np.exp(-props.knh * alpha) + props.kcH
energyp = props.kHiso * alpha + ( props.kHiso * (np.exp(-props.knh*alpha) - 1)) / props.knh + 0.5 * props.kcH * alpha**2.0
elif props.FlagHardening == 3:
kappa = props.kHiso * (np.exp(props.knh * alpha - 1))
dkappa = props.kHiso * props.knh * (np.exp(props.knh * alpha) )
energyp = props.kHiso* ((np.exp(props.knh * alpha - 1) / props.knh - alpha))
else:
raise Exception("FlagHardening is not correctly defined")
return kappa, dkappa, energyp
def vol_functions(self, props, J):
# G = props.mu
Bulk = props.Bulk
dUdJ = (1.0/J)*(Bulk)*np.log(J)
energyv = 0.5 * (Bulk) * ( np.log(J) ** 2.0 )
return dUdJ, energyv
def exp_matrix_sym_3x3(self, M):
eigenvalues, eigenvectors = np.linalg.eigh( M )
expM = 0.0e0
for k in range(3):
expM = expM + np.exp(eigenvalues[k]) * mr.tensor_product(eigenvectors[:,k], eigenvectors[:,k])
return expM
def visco_arrasto(self, props, alpha):
s0 = props.s0
scv = props.scv
sg = props.sg
sz = props.sz
sb = props.sb
R = alpha
fArr = scv + np.exp(-sz*R) * ( (s0-scv)*np.cosh(sb*R)+sg*np.sinh(sb*R))
c1 = (s0-scv)*sb - sg*sz
c2 = sg*sb - (s0-scv)*sz
c3 = c1 * sb - sz * c2
c4 = c2 * sb - sz * c1
dfArr = np.exp(-sz*R)*(c1*np.sinh(sb*R)+c2*np.cosh(sb*R))
d2fArr = np.exp(-sz*R) * (c3*np.cosh(sb*R) + c4*np.sinh(sb*R))
return fArr, dfArr, d2fArr
def hydro_func(self, props, vin, vin1, deltat):
Sy0 = props.Sy0
m = props.km
n = props.knd
kR = props.kR
g = props.kg
kS = props.kS
kN = props.kN
theta = props.params[0]
gamma = props.params[1]
# zeta = props.params[2]
dpn = vin[1]
dhn = vin[2]
alphan = vin[3]
Yn = vin[4]
# dpn1 = pvin1[1]
dhn1 = vin1[2]
alphan1 = vin1[3]
Yn1 = vin1[4]
# Ddp = dpn1-dpn
Ddh = dhn1-dhn
delta_alpha = alphan1-alphan
dn = dpn + dhn
# dn1 = dn + (Ddp + Ddh)
Ytheta = (1-theta)*Yn + theta*Yn1
Ygamma = (1-gamma)*Yn + gamma*Yn1
# Yzeta = (1-zeta)*Yn + zeta*Yn1
dtheta = dn + theta*((delta_alpha*(Ytheta**kS)/kN) + Ddh)
TERM1= -(Yn1+g) + ( kR / (((1-dtheta)**n) * ((Ygamma+g)**(m-1)))) * (Ddh/deltat)
TERM2 = theta*deltat * ( ( n*kR / (2*( (1-dtheta)**(n+1) )*( (Ygamma+g)**(m-1) )) ) * ( (Ddh/deltat)**2))
TERM3 = deltat*(-Sy0*delta_alpha/deltat)
VARS = TERM1 + TERM2 + TERM3
return VARS
def compute_expressions(self, props, vin, vin1, deltat):
Sy0 = props.Sy0
m = props.km
n = props.knd
kR = props.kR
g = props.kg
kS = props.kS
kN = props.kN
keta = props.keta
kc = props.kc
theta = props.params[0]
gamma = props.params[1]
dpn = vin[1]
dhn = vin[2]
alphan = vin[3]
Yn = vin[4]
dpn1 = vin1[1]
dhn1 = vin1[2]
alphan1 = vin1[3]
Yn1 = vin1[4]
Ddp = dpn1-dpn
Ddh = dhn1-dhn
delta_alpha = alphan1-alphan
dn = dpn + dhn
dn1 = dn + (Ddp + Ddh)
Ytheta = (1-theta)*Yn + theta*Yn1
Ygamma = (1-gamma)*Yn + gamma*Yn1
dtheta = dn + theta*((delta_alpha*(Ytheta**kS)/kN) + Ddh)
fArr, dfArr, _ = self.visco_arrasto(props, alphan1)
kappa, _, _ = self.kappa_functions(props, alphan1)
FATOR = (kR/2.0) * (Ddh/deltat)**2.0
FG = ( (1-dn1)
+ deltat * (delta_alpha/deltat) * ((Yn1**(kS))/kN)
- deltat * Sy0*(delta_alpha/deltat)*kS*delta_alpha*((Yn1**(kS-1.0))/kN)
+ deltat * FATOR * gamma * (1.0-m)/( ((1.0-dtheta)**(n))*(Ygamma+g)**(m))
+ deltat * FATOR * theta * n/( ((1.0-dtheta)**(n+1))*(Ygamma+g)**(m-1.0))
* theta*kS*delta_alpha*((Ytheta**(kS-1.0))/kN)
)
FA = FG*kappa + (1-dn1)*Sy0 + fArr*(delta_alpha/(deltat*kc))**(keta)
FB = ( (kc/(keta+1))*((delta_alpha/(deltat*kc))**(keta+1))*dfArr
- Sy0*(delta_alpha/deltat)*((Yn1**(kS))/kN)
+ FATOR * theta * n/( ((1-dtheta)**(n+1))*(Ygamma+g)**(m-1))*((Ytheta**(kS))/kN)
)
return FA, FB, FG
def rm_functions(self, dWede , M , props, vin, vin1, deltat):
VFun = np.empty(2)
FA, FB, FG = self.compute_expressions(props, vin, vin1, deltat)
Seq = mr.tensor_inner_product(dWede,M)
VFun[0] = -FG * Seq + FA + deltat * FB
Vars = self.hydro_func(props, vin, vin1, deltat)
VFun[1] = Vars
return VFun
def compute_hydrolytic(self, props,vin,vin1,deltat):
m = props.km
n = props.knd
kR = props.kR
g = props.kg
kS = props.kS
kN = props.kN
theta = props.params[0]
gamma = props.params[1]
pvin=vin.copy()
pvin1=vin1.copy()
dpn = pvin[1]
dhn = pvin[2]
alphan = pvin[3]
Yn = pvin[4]
dpn1 = pvin1[1]
dhn1 = pvin1[2]
alphan1 = pvin1[3]
Yn1 = pvin1[4]
Ddp = dpn1-dpn
Ddh = dhn1-dhn
delta_alpha = alphan1-alphan
dn = dpn + dhn
Ygamma = (1-gamma)*Yn + gamma*Yn1
Ytheta = (1-theta)*Yn + theta*Yn1
Ddh = (( ( ((1-dn)**n) * ((Ygamma+g)**(m)) ) ) /kR ) * deltat
dn1 = dn + (Ddp + Ddh)
dtheta = dn + theta*((delta_alpha*(Ytheta**kS)/kN) + Ddh)
DELTA = 0.0
pvin1[0] = 1 - dn1
pvin1[2] = dhn + Ddh
FVAL = self.hydro_func(props, pvin, pvin1, deltat)
erro = 1.0
TOL = 1.0e-6
cont = 0
while (erro > TOL):
Kdh = (
+ (kR/( ((1-dtheta)**n) * (Ygamma+g)**(m-1)) ) * ((1/deltat) + theta*(n/(1-dtheta))*(Ddh/deltat))
+ theta* (n*kR/( ((1-dtheta)**(n+2)) * (Ygamma+g)**(m-1)) ) * ( (1-dtheta)*(Ddh/deltat) +theta*deltat*((n+1)/2)*((Ddh/deltat)**2))
)
DELTA = - FVAL / Kdh
Ddh = Ddh + DELTA
pvin1[2] = pvin[2] + Ddh
FVAL = self.hydro_func(props, pvin, pvin1, deltat)
erro = abs(FVAL)
cont=cont+1
if ( (cont > 20) or (Ddh < 0.0) ) :
print('compute_hydrolytic: Your circuit`s dead, there`s something wrong. Can you hear me, <NAME>?')
quit()
return
VARS = Ddh
return VARS
def resid_functions(self, epstr, M, Ea, props, J, vin, vin1, deltat, delta_alpha, Ddh):
# dWede = np.zeros((3,3))
# dWedej = np.zeros((3,3))
# dWe2de2j= np.zeros((3,3))
eps= np.zeros((3,3))
dummy= np.zeros((3))
kS = props.kS
kN = props.kN
zeta = props.params[2]
pvin1=vin1.copy()
pvin=vin.copy()
dpn = pvin[1]
dhn = pvin[2]
alphan = pvin[3]
Yn = pvin[4]
dpn1 = pvin1[1]
dhn1 = pvin1[2]
alphan1 = pvin1[3]
Yn1 = pvin1[4]
Ddp = dpn1-dpn
alphan1 = alphan + delta_alpha
eps = epstr - delta_alpha*M
dWedej, _, energye = self.hencky(props, eps, Ea)
dummy[0], dummy[1], energyp = self.kappa_functions(props, alphan1)
dummy[0], energyv = self.vol_functions(props, J)
Yn1 = energye + energyv + energyp
Yzeta = (1-zeta)*Yn+zeta*Yn1
Ddp=delta_alpha*(Yzeta**kS)/kN
dpn1=dpn+Ddp
dhn1=dhn+Ddh
pvin1[1] = dpn1
pvin1[2] = dhn1
pvin1[3] = alphan1
pvin1[4] = Yn1
dWede= (
+ dWedej[0,0]*Ea[:,:,0]
+ dWedej[1,1]*Ea[:,:,1]
+ dWedej[2,2]*Ea[:,:,2]
)
VFun = self.rm_functions(dWede, M , props, pvin, pvin1, deltat)
return VFun
def fixed_point_search(self, epstr, M, Ea, props, J, vin, vin1, deltat, DELTA, flag_where):
dummy = np.zeros(3)
kS = props.kS
kN = props.kN
zeta = props.params[2]
TOL = 1.0e-6
pvin1=vin1.copy()
pvin=vin.copy()
dpn = pvin[1]
dhn = pvin[2]
alphan = pvin[3]
Yn = pvin[4]
dpn1 = pvin1[1]
dhn1 = pvin1[2]
alphan1 = pvin1[3]
Yn1 = pvin1[4]
Ddp = dpn1-dpn
Ddh = dhn1-dhn
delta_alpha = DELTA[0]
erro = 1
cont = 1
conti = 1
delta_alpha0=delta_alpha
VFun = self.resid_functions(epstr, M, Ea, props, J, vin, vin1, deltat, delta_alpha, Ddh)
while ((VFun[0] > 0.0e0) and (delta_alpha >= 1.0e16)):
delta_alpha=delta_alpha*1.0e-1
VFun = self.resid_functions(epstr, M, Ea, props, J, vin, vin1, deltat, delta_alpha, Ddh)
delta_alpha0=delta_alpha
if ((VFun[0] > 0.0e0) and (abs(delta_alpha) <= 1.0e-16)):
delta_alpha =1.0e-16
else:
while ((erro > TOL) and (cont < 20)):
fator = 1
# Search for a positive residue
while (VFun[0] < 0.0e0):
delta_alpha=delta_alpha0*((10)**fator)
VFun = self.resid_functions(epstr, M, Ea, props, J, vin, vin1, deltat, delta_alpha, Ddh)
fator=fator+1
a=delta_alpha0
b=delta_alpha
c=0.5e0*(a+b)
flag_restart = 1
conti = 1
# ! BEGIN - Bissection Method - Finds delta_alpha with fixed Ddh
while (flag_restart == 1):
VFun = self.resid_functions(epstr, M, Ea, props, J, vin, vin1, deltat, c, Ddh)
if (VFun[0] < 0.0e0):
a = c
else:
b = c
if (abs(VFun[0]) <= TOL) :
flag_restart = 0
else:
conti=conti+1
if ((0.5e0*abs(a-b) < 1.0e-16) or (conti >= 50)):
if (conti>=50):
print("Bissection method error")
exit
else:
VFun = self.resid_functions(epstr, M, Ea, props, J, vin, vin1, deltat, a, Ddh)
DELTA = [a, Ddh]
return DELTA
else:
c=0.5e0*(a+b)
# ! END - BISSECTION METHOD
# ! BEGIN - Newton's method - Search for Ddh with fixed delta_alpha
delta_alpha = c
alphan1 = alphan + delta_alpha
eps = epstr - delta_alpha*M
_, _, energye = self.hencky(props, eps, Ea)
dummy[0], dummy[1], energyp = self.kappa_functions(props, alphan1)
dummy[0], energyv = self.vol_functions(props, J)
Yn1 = energye + energyv + energyp
Yzeta = (1-zeta)*Yn+zeta*Yn1
Ddp=delta_alpha*(Yzeta**kS)/kN
dpn1=dpn+Ddp
dhn1=dhn+Ddh
pvin1[1] = dpn1
pvin1[2] = dhn1
pvin1[3] = alphan1
pvin1[4] = Yn1
Ddh = self.compute_hydrolytic(props, pvin, pvin1, deltat)
VFun = self.resid_functions(epstr, M, Ea, props, J, vin, vin1, deltat, c, Ddh)
erro = mr.norm(VFun)
cont = cont + 1
if ((delta_alpha < 1.0e-16) or (Ddh < 0.0e0) or (cont > 20)):
print('ERROR')
return
DELTA = [delta_alpha, Ddh]
return DELTA
def return_mapping(self, etr, Ea, M, J, props, vin, vin1, deltat, flag_where):
dummy = np.zeros(3)
VARS = np.empty(4)
kS = props.kS
kN = props.kN
zeta = props.params[2]
pvin6 = vin[5]
if (pvin6 == 0 ):
alpha_guess = props.alpha_guess
else:
alpha_guess = pvin6 * 1.0e-3
if (alpha_guess < 1.0e-16):
alpha_guess = 1.0e-16
DELTA = 0.0e0
# !===========================================================
# !===========================================================
vetr = etr.copy()
pvin1=vin1.copy()
pvin=vin.copy()
dpn = pvin[1]
dhn = pvin[2]
alphan = pvin[3]
Yn = pvin[4]
dpn1 = pvin1[1]
dhn1 = pvin1[2]
alphan1 = pvin1[3]
Yn1 = pvin1[4]
Ddp = dpn1-dpn
Ddh = dhn1-dhn
Ddh = 0
delta_alpha = alpha_guess
alphan1 = 0.0e0
alphan1 = alphan+delta_alpha
dWedej, _, dummy[0] = self.hencky(props, vetr, Ea)
dWede = (
+ dWedej[0,0]*Ea[:,:,0]
+ dWedej[1,1]*Ea[:,:,1]
+ dWedej[2,2]*Ea[:,:,2]
)
epstr=0.e0
for k in range(3):
epstr = epstr + etr[k]*Ea[:,:,k] #,0
DELTA = [delta_alpha, Ddh]
DELTA = self.fixed_point_search(epstr, M, Ea, props, J, vin, vin1, deltat, DELTA, flag_where)
delta_alpha = DELTA[0]
Ddh = DELTA[1]
alphan1 = alphan + delta_alpha
eps = epstr - delta_alpha*M
dWedej, _, energye = self.hencky(props, eps, Ea)
dummy[0], dummy[1], energyp = self.kappa_functions(props, alphan1)
dummy[0], energyv = self.vol_functions(props, J)
Yn1 = energye + energyv + energyp
Yzeta = (1-zeta)*Yn+zeta*Yn1
Ddp=delta_alpha*(Yzeta**kS)/kN
dpn1=dpn+Ddp
dhn1=dhn+Ddh
dWede = (
+ dWedej[0,0]*Ea[:,:,0]
+ dWedej[1,1]*Ea[:,:,1]
+ dWedej[2,2]*Ea[:,:,2]
)
VARS[0]=alphan1
VARS[1]=Ddp
VARS[2]=Ddh
VARS[3]=Yn1
return VARS, dWede
class VariationalViscoHydrolysisAxi(VariationalViscoHydrolysis):
def calculate_state(self, F, time=None, **kwargs):
trial_state = copy.deepcopy(self.state)
if time == 0.0: return trial_state
trial_state.F = copy.deepcopy(F)
Fn1 = copy.deepcopy(F)
I = np.eye(3)
ctr=np.empty((3))
vdWede = np.empty((3))
dWede = np.empty((3,3))
etr = np.empty((3,))
# ! -----------------------------------------------------------------------------------
if ( np.isnan(Fn1[0,0]) ) :
print('Fn1[0,0] is NAN')
trial_state.cauchy_stress[:] = -np.log(0.0)
trial_state.error = True
return trial_state
vin = copy.deepcopy( self.state.vin)
vin1 = copy.deepcopy( self.state.vin)
dn = 1.0e0 - vin[0]
dpn = vin[1]
dhn = vin[2]
alphan = vin[3]
timen = copy.deepcopy( self.state.timen)
Fpn = copy.deepcopy( self.state.Fpn)
timen1 = time
deltat = timen1 - timen
assert deltat != 0.0
Sy0 = self.properties.Sy0
J = np.linalg.det(Fn1)
Cn1 = np.matmul(np.transpose(Fn1), Fn1)
Fn1_iso = (J**(-1.0e0/3.0e0))*Fn1
Cn1_iso = np.matmul(np.transpose(Fn1_iso), Fn1_iso)
Fpn_inv = np.linalg.inv(Fpn)
Ctr_iso= np.matmul(
np.transpose(Fpn_inv),
np.matmul(Cn1_iso, Fpn_inv)
)
eigenvalues, eigenvectors = np.linalg.eigh(Ctr_iso)
ctr = eigenvalues[:]
Ea = np.empty(shape=(3,3,3))
for k in range(3):
Ea[:,:,k] = mr.tensor_product( eigenvectors[:,k], eigenvectors[:,k])
etr = 0.5e0*np.log(eigenvalues)
dWtrj, _, energye = self.hencky(self.properties, etr, Ea)
kappa, _, energyp = self.kappa_functions(self.properties, alphan)
dUdJ, energyv = self.vol_functions(self.properties, J)
Yn1 = energye + energyv + energyp
vin1[4] = Yn1
Ddh = self.compute_hydrolytic(self.properties, vin, vin1, deltat)
Ddp = 0
dn1 = dn + (Ddp + Ddh)
delta_alpha=0
dhn1 = dhn + Ddh
vin1[0] = 1.0e0-dn1
vin1[2] = dhn1
vin1[3] = vin[3]
if (np.isnan( vin1[2])) :
print('vin1(3) is NAN')
trial_state.cauchy_stress[:] = -np.log(0.0)
trial_state.error = True
return trial_state
FA, FB, FG = self.compute_expressions(self.properties, vin, vin1, deltat)
dWtr = (
+ dWtrj[0,0]*Ea[:,:,0]
+ dWtrj[1,1]*Ea[:,:,1]
+ dWtrj[2,2]*Ea[:,:,2]
)
devdWtr = mr.deviatoric(dWtr)
norma=np.sqrt((mr.tensor_inner_product(dWtr, dWtr)))
if (norma == 0):
M = I
else:
norma=np.sqrt(mr.tensor_inner_product(devdWtr, devdWtr))
M = np.sqrt(3.0e0/2.0e0)*devdWtr/norma
Ttrial = mr.tensor_inner_product(dWtr, M)
finelast = kappa + ((1-dn1)*Sy0 + deltat*FB )/FG
finelast2 = (FA + deltat*FB)/FG
ratio = abs(finelast/finelast2)
if (abs(ratio-1.0e0) > 1.0e-8) :
print('finelast ratio has found a problem')
raise ValueError
ftrial = - Ttrial + finelast
qfunc = ftrial/finelast
TOLESC = 1.0e-4
if ( (qfunc) >= -TOLESC):
trial_state.flag_ELAST = 1
if (self.properties.FlagHidrDam == 0):
Ddh=0.0e0
wn1=(1.0e0-dn1)
dpn1=dpn
alphan1=alphan
dWdCtr = 0.0e0
for k in range(3):
dWdCtr = dWdCtr + (0.5e0*dWtrj[k,k]/ctr[k]) * Ea[:,:,k]
dWdC = np.matmul( Fpn_inv, np.matmul( dWdCtr, np.transpose(Fpn_inv) ) )
DEV_dWdC = dWdC - (1.0e0/3.0e0)*mr.tensor_inner_product(dWdC, Cn1)* np.linalg.inv(Cn1)
dUdJ, energyv = self.vol_functions(self.properties, J)
stress0 = 2.0e0*(J**(-2.0e0/3.0e0))*DEV_dWdC + J * dUdJ * np.linalg.inv(Cn1)
stress = FG*stress0
# Modified Cauchy Stress - Calculated in 3D Tensorial Format and converted to Voigt notation.
Sn1 = np.matmul(np.matmul(Fn1,stress),np.transpose(Fn1))/J
# saving stress and internal variables to a trial state
trial_state.cauchy_stress = mr.to_voigt(Sn1)
fArrn1, _, _ = self.visco_arrasto(self.properties, alphan1)
trial_state.Fpn = Fpn
trial_state.vin = [wn1, dpn1, dhn1, alphan1, Yn1, 0, 0, 0, 0, 0]
trial_state.dWdCiso = dWdC
trial_state.DEV_dWdCiso = DEV_dWdC
else:
trial_state.flag_ELAST = 0
vin1[7]=qfunc
VARS, dWede = self.return_mapping(etr, Ea, M, J, self.properties, vin, vin1, deltat, 1)
alphan1 = VARS[0]
Ddp = VARS[1]
Ddh = VARS[2]
Yn1 = VARS[3]
delta_alpha = alphan1 - alphan
dhn1=dhn+Ddh
dpn1=dpn+Ddp
dn1=dn+(Ddp+Ddh)
alphaM = delta_alpha * M
expM = self.exp_matrix_sym_3x3(alphaM)
Fpn1 = np.matmul(expM, Fpn)
vdWede[0] = mr.tensor_inner_product(dWede, Ea[:,:,0])
vdWede[1] = mr.tensor_inner_product(dWede, Ea[:,:,1])
vdWede[2] = mr.tensor_inner_product(dWede, Ea[:,:,2])
dWdCtr = 0.0e0
for k in range(3):
dWdCtr = dWdCtr + (0.5e0*vdWede[k]/ctr[k]) * Ea[:,:,k]
dWdC = np.matmul( Fpn_inv, np.matmul(dWdCtr, np.transpose(Fpn_inv)))
DEV_dWdC = dWdC - (1.0e0/3.0e0)*mr.tensor_inner_product(dWdC, Cn1)* np.linalg.inv(Cn1)
wn1=(1.0e0 - dn1)
dUdJ, energyv = self.vol_functions(self.properties, J)
vin1 = [wn1, dpn1, dhn1, alphan1, Yn1, 0, 0, 0, 0, 0] #ver se nao dá problema por ser lista
FA, FB, FG = self.compute_expressions(self.properties, vin, vin1, deltat)
stress0 = 2.0e0*(J**(-2.0e0/3.0e0))*DEV_dWdC + J * dUdJ * np.linalg.inv(Cn1)
stress = FG*stress0
# Modified Cauchy Stress - Calculated in 3D Tensorial Format and converted to Voigt notation.
Sn1 = np.matmul( | np.matmul(Fn1,stress) | numpy.matmul |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from gym import spaces
from habitat.core.embodied_task import Measure
from habitat.core.registry import registry
from habitat.core.simulator import Sensor, SensorTypes
from habitat.tasks.nav.nav import PointGoalSensor
from habitat.tasks.rearrange.rearrange_sim import RearrangeSim
from habitat.tasks.rearrange.utils import (
CollisionDetails,
batch_transform_point,
rearrange_logger,
)
from habitat.tasks.utils import cartesian_to_polar, get_angle
class MultiObjSensor(PointGoalSensor):
"""
Abstract parent class for a sensor that specifies the locations of all targets.
"""
def __init__(self, *args, task, **kwargs):
self._task = task
self._sim: RearrangeSim
super(MultiObjSensor, self).__init__(*args, task=task, **kwargs)
def _get_observation_space(self, *args, **kwargs):
n_targets = self._task.get_n_targets()
return spaces.Box(
shape=(n_targets * 3,),
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
dtype=np.float32,
)
@registry.register_sensor
class TargetCurrentSensor(MultiObjSensor):
"""
This is the ground truth object position sensor relative to the robot end-effector coordinate frame.
"""
cls_uuid: str = "obj_goal_pos_sensor"
def _get_observation_space(self, *args, **kwargs):
return spaces.Box(
shape=(3,),
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
dtype=np.float32,
)
def get_observation(self, observations, episode, *args, **kwargs):
self._sim: RearrangeSim
T_inv = self._sim.robot.ee_transform.inverted()
idxs, _ = self._sim.get_targets()
scene_pos = self._sim.get_scene_pos()
pos = scene_pos[idxs]
for i in range(pos.shape[0]):
pos[i] = T_inv.transform_point(pos[i])
return pos.reshape(-1)
@registry.register_sensor
class TargetStartSensor(MultiObjSensor):
"""
Relative position from end effector to target object
"""
cls_uuid: str = "obj_start_sensor"
def get_observation(self, *args, observations, episode, **kwargs):
self._sim: RearrangeSim
global_T = self._sim.robot.ee_transform
T_inv = global_T.inverted()
pos = self._sim.get_target_objs_start()
return batch_transform_point(pos, T_inv, np.float32).reshape(-1)
class PositionGpsCompassSensor(Sensor):
def __init__(self, *args, sim, task, **kwargs):
self._task = task
self._sim = sim
super().__init__(*args, task=task, **kwargs)
def _get_sensor_type(self, *args, **kwargs):
return SensorTypes.TENSOR
def _get_observation_space(self, *args, config, **kwargs):
n_targets = self._task.get_n_targets()
self._polar_pos = np.zeros(n_targets * 2, dtype=np.float32)
return spaces.Box(
shape=(n_targets * 2,),
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
dtype=np.float32,
)
def _get_positions(self) -> np.ndarray:
raise NotImplementedError("Must override _get_positions")
def get_observation(self, task, *args, **kwargs):
pos = self._get_positions()
robot_T = self._sim.robot.base_transformation
rel_pos = batch_transform_point(pos, robot_T.inverted(), np.float32)
for i, rel_obj_pos in enumerate(rel_pos):
rho, phi = cartesian_to_polar(rel_obj_pos[0], rel_obj_pos[1])
self._polar_pos[(i * 2) : (i * 2) + 2] = [rho, -phi]
return self._polar_pos
@registry.register_sensor
class TargetStartGpsCompassSensor(PositionGpsCompassSensor):
cls_uuid: str = "obj_start_gps_compass"
def _get_uuid(self, *args, **kwargs):
return TargetStartGpsCompassSensor.cls_uuid
def _get_positions(self) -> np.ndarray:
return self._sim.get_target_objs_start()
@registry.register_sensor
class TargetGoalGpsCompassSensor(PositionGpsCompassSensor):
cls_uuid: str = "obj_goal_gps_compass"
def _get_uuid(self, *args, **kwargs):
return TargetGoalGpsCompassSensor.cls_uuid
def _get_positions(self) -> np.ndarray:
_, pos = self._sim.get_targets()
return pos
@registry.register_sensor
class AbsTargetStartSensor(MultiObjSensor):
"""
Relative position from end effector to target object
"""
cls_uuid: str = "abs_obj_start_sensor"
def get_observation(self, observations, episode, *args, **kwargs):
pos = self._sim.get_target_objs_start()
return pos.reshape(-1)
@registry.register_sensor
class GoalSensor(MultiObjSensor):
"""
Relative to the end effector
"""
cls_uuid: str = "obj_goal_sensor"
def get_observation(self, observations, episode, *args, **kwargs):
global_T = self._sim.robot.ee_transform
T_inv = global_T.inverted()
_, pos = self._sim.get_targets()
return batch_transform_point(pos, T_inv, np.float32).reshape(-1)
@registry.register_sensor
class AbsGoalSensor(MultiObjSensor):
cls_uuid: str = "abs_obj_goal_sensor"
def get_observation(self, *args, observations, episode, **kwargs):
_, pos = self._sim.get_targets()
return pos.reshape(-1)
@registry.register_sensor
class JointSensor(Sensor):
def __init__(self, sim, config, *args, **kwargs):
super().__init__(config=config)
self._sim = sim
def _get_uuid(self, *args, **kwargs):
return "joint"
def _get_sensor_type(self, *args, **kwargs):
return SensorTypes.TENSOR
def _get_observation_space(self, *args, config, **kwargs):
return spaces.Box(
shape=(config.DIMENSIONALITY,),
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
dtype=np.float32,
)
def get_observation(self, observations, episode, *args, **kwargs):
joints_pos = self._sim.robot.arm_joint_pos
return np.array(joints_pos, dtype=np.float32)
@registry.register_sensor
class JointVelocitySensor(Sensor):
def __init__(self, sim, config, *args, **kwargs):
super().__init__(config=config)
self._sim = sim
def _get_uuid(self, *args, **kwargs):
return "joint_vel"
def _get_sensor_type(self, *args, **kwargs):
return SensorTypes.TENSOR
def _get_observation_space(self, *args, config, **kwargs):
return spaces.Box(
shape=(config.DIMENSIONALITY,),
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
dtype=np.float32,
)
def get_observation(self, observations, episode, *args, **kwargs):
joints_pos = self._sim.robot.arm_velocity
return np.array(joints_pos, dtype=np.float32)
@registry.register_sensor
class EEPositionSensor(Sensor):
cls_uuid: str = "ee_pos"
def __init__(self, sim, config, *args, **kwargs):
super().__init__(config=config)
self._sim = sim
@staticmethod
def _get_uuid(*args, **kwargs):
return EEPositionSensor.cls_uuid
def _get_sensor_type(self, *args, **kwargs):
return SensorTypes.TENSOR
def _get_observation_space(self, *args, **kwargs):
return spaces.Box(
shape=(3,),
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
dtype=np.float32,
)
def get_observation(self, observations, episode, *args, **kwargs):
trans = self._sim.robot.base_transformation
ee_pos = self._sim.robot.ee_transform.translation
local_ee_pos = trans.inverted().transform_point(ee_pos)
return np.array(local_ee_pos)
@registry.register_sensor
class RelativeRestingPositionSensor(Sensor):
cls_uuid: str = "relative_resting_position"
def _get_uuid(self, *args, **kwargs):
return RelativeRestingPositionSensor.cls_uuid
def __init__(self, sim, config, *args, **kwargs):
super().__init__(config=config)
self._sim = sim
def _get_sensor_type(self, *args, **kwargs):
return SensorTypes.TENSOR
def _get_observation_space(self, *args, **kwargs):
return spaces.Box(
shape=(3,),
low= | np.finfo(np.float32) | numpy.finfo |
#!/usr/bin/env python
"""
@package ion_functions.qc_functions
@file ion_functions/qc_functions.py
@author <NAME>
@brief Module containing QC functions ported from matlab samples in DPS documents
"""
from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month
import time
import numpy as np
import numexpr as ne
from scipy.interpolate import LinearNDInterpolator
from ion_functions import utils
from ion_functions.utils import fill_value
# try to load the OOI logging module, using default Python logging module if
# unavailable
try:
from ooi.logging import log
except ImportError:
import logging
log = logging.getLogger('ion-functions')
def is_fill(arr):
return np.atleast_1d(arr)[-1] == -9999. # Not the normal fill value, it's hardcoded to the QC params
def is_none(arr):
return arr is None or (np.atleast_1d(arr)[-1] == None)
def dataqc_globalrangetest_minmax(dat, dat_min, dat_max, strict_validation=False):
'''
Python wrapper for dataqc_globalrangetest
Combines the min/max arguments into list for dataqc_globalrangetest
'''
if is_none(dat_min) or is_none(dat_max) or is_fill(dat_min) or is_fill(dat_max):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_globalrangetest(dat, [np.atleast_1d(dat_min)[-1], np.atleast_1d(dat_max)[-1]], strict_validation=strict_validation)
def dataqc_globalrangetest(dat, datlim, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. Returns 1 for presumably good data and 0 for
data presumed bad.
Implemented by:
2010-07-28: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance improvements by adding
strict_validation flag.
Usage:
qcflag = dataqc_globalrangetest(dat, datlim, strict_validation)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = Input dataset, any scalar or vector. Must be numeric and real.
datlim = Two-element vector with the minimum and maximum values
considered to be valid.
strict_validation = Flag (default is False) to assert testing of input
types (e.g. isreal, isnumeric)
References:
OOI (2012). Data Product Specification for Global Range Test. Document
Control Number 1341-10004. https://alfresco.oceanobservatories.org
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10004_Data_Product_SPEC_GLBLRNG_OOI.pdf)
"""
dat = np.atleast_1d(dat)
datlim = np.atleast_1d(datlim)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isnumeric(datlim).all():
raise ValueError('\'datlim\' must be numeric')
if not utils.isreal(datlim).all():
raise ValueError('\'datlim\' must be real')
if len(datlim) < 2: # Must have at least 2 elements
raise ValueError('\'datlim\' must have at least 2 elements')
return (datlim.min() <= dat) & (dat <= datlim.max()).astype('int8')
def dataqc_localrangetest_wrapper(dat, datlim, datlimz, dims, pval_callback):
if is_none(datlim) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(datlimz) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(dims):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(pval_callback):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
z = []
for dim in dims:
if dim == 'month':
# Convert time vector to vector of months
v = pval_callback('time')
v = np.asanyarray(v, dtype=np.float)
v = ntp_to_month(v)
z.append(v)
else:
# Fetch the dimension from the callback method
v = pval_callback(dim)
z.append(v)
if len(dims)>1:
z = np.column_stack(z)
else:
z = z[0]
datlimz = datlimz[:,0]
return dataqc_localrangetest(dat, z, datlim, datlimz)
def dataqc_localrangetest(dat, z, datlim, datlimz, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. This range is not constant but varies with
measurement location. Returns 1 for presumably good data and 0 for data
presumed bad.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
qcflag = dataqc_localrangetest(dat, z, datlim, datlimz)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric real scalar or column vector.
z = location of measurement dat. must have same # of rows as dat and
same # of columns as datlimz
datlim = two column array with the minimum (column 1) and maximum
(column 2) values considered valid.
datlimz = array with the locations where datlim is given. must have
same # of rows as datlim and same # of columns as z.
References:
OOI (2012). Data Product Specification for Local Range Test. Document
Control Number 1341-10005. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10005_Data_Product_SPEC_LOCLRNG_OOI.pdf)
"""
if strict_validation:
# check if dat and datlim are matrices
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a matrix')
if not utils.ismatrix(datlim):
raise ValueError('\'datlim\' must be a matrix')
# check if all inputs are numeric and real
for k, arg in {'dat': dat, 'z': z, 'datlim': datlim,
'datlimz': datlimz}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
if len(datlim.shape) == 3 and datlim.shape[0] == 1:
datlim = datlim.reshape(datlim.shape[1:])
if len(datlimz.shape) == 3 and datlimz.shape[0] == 1:
datlimz = datlimz.reshape(datlimz.shape[1:])
# test size and shape of the input arrays datlimz and datlim, setting test
# variables.
array_size = datlimz.shape
if len(array_size) == 1:
numlim = array_size[0]
ndim = 1
else:
numlim = array_size[0]
ndim = array_size[1]
array_size = datlim.shape
tmp1 = array_size[0]
tmp2 = array_size[1]
if tmp1 != numlim:
raise ValueError('\'datlim\' and \'datlimz\' must '
'have the same number of rows.')
if tmp2 != 2:
raise ValueError('\'datlim\' must be structured as 2-D array '
'with exactly 2 columns and 1 through N rows.')
# test the size and shape of the z input array
array_size = z.shape
if len(array_size) == 1:
num = array_size[0]
tmp2 = 1
else:
num = array_size[0]
tmp2 = array_size[1]
if tmp2 != ndim:
raise ValueError('\'z\' must have the same number of columns '
'as \'datlimz\'.')
if num != dat.size:
raise ValueError('Len of \'dat\' must match number of '
'rows in \'z\'')
# test datlim, values in column 2 must be greater than those in column 1
if not all(datlim[:, 1] > datlim[:, 0]):
raise ValueError('Second column values of \'datlim\' should be '
'greater than first column values.')
# calculate the upper and lower limits for the data set
if ndim == 1:
# determine the lower limits using linear interpolation
lim1 = np.interp(z, datlimz, datlim[:, 0], left=np.nan, right=np.nan)
# determine the upper limits using linear interpolation
lim2 = np.interp(z, datlimz, datlim[:, 1], left=np.nan, right=np.nan)
else:
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional lower limits
F = LinearNDInterpolator(datlimz, datlim[:, 0].reshape(numlim, 1))
lim1 = F(z).reshape(dat.size)
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional upper limits
F = LinearNDInterpolator(datlimz, datlim[:, 1].reshape(numlim, 1))
lim2 = F(z).reshape(dat.size)
# replace NaNs from above interpolations
ff = (np.isnan(lim1)) | (np.isnan(lim2))
lim1[ff] = np.max(datlim[:, 1])
lim2[ff] = np.min(datlim[:, 0])
# compute the qcflags
qcflag = (dat >= lim1) & (dat <= lim2)
return qcflag.astype('int8')
def dataqc_spiketest_wrapper(dat, acc, N, L, strict_validation=False):
if is_none(acc) or is_fill(acc) or is_none(N) or is_fill(N) or is_none(L) or is_fill(L):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_spiketest(dat, np.atleast_1d(acc)[-1], np.atleast_1d(N)[-1], np.atleast_1d(L)[-1], strict_validation=strict_validation)
def dataqc_spiketest(dat, acc, N=5, L=5, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for spikes.
Returns 1 for presumably good data and 0 for data presumed bad.
The time series is divided into windows of len L (an odd integer
number). Then, window by window, each value is compared to its (L-1)
neighboring values: a range R of these (L-1) values is computed (max.
minus min.), and replaced with the measurement accuracy ACC if ACC>R. A
value is presumed to be good, i.e. no spike, if it deviates from the
mean of the (L-1) peers by less than a multiple of the range,
N*max(R,ACC).
Further than (L-1)/2 values from the start or end points, the peer
values are symmetrically before and after the test value. Within that
range of the start and end, the peers are the first/last L values
(without the test value itself).
The purpose of ACC is to restrict spike detection to deviations
exceeding a minimum threshold value (N*ACC) even if the data have
little variability. Use ACC=0 to disable this behavior.
Implemented by:
2012-07-28: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance optimizations.
Usage:
qcflag = dataqc_spiketest(dat, acc, N, L)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric, real vector.
acc = Accuracy of any input measurement.
N = (optional, defaults to 5) Range multiplier, cf. above
L = (optional, defaults to 5) Window len, cf. above
References:
OOI (2012). Data Product Specification for Spike Test. Document
Control Number 1341-10006. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10006_Data_Product_SPEC_SPKETST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a vector')
for k, arg in {'acc': acc, 'N': N, 'L': L}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
dat = np.asanyarray(dat, dtype=np.float)
out = spikevalues(dat, L, N, acc)
return out
def dataqc_polytrendtest_wrapper(dat, t, ord_n, nstd, strict_validation=False):
if is_none(ord_n) or is_fill(ord_n) or is_none(nstd) or is_fill(ord_n):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_polytrendtest(dat, t, np.atleast_1d(ord_n)[-1], np.atleast_1d(nstd)[-1], strict_validation=strict_validation)
def dataqc_polytrendtest(dat, t, ord_n=1, nstd=3, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements contain a
significant portion of a polynomial. Returns 1 if this is not the case,
else 0.
The purpose of this test is to check if a significant fraction of the
variability in a time series can be explained by a drift, possibly
interpreted as a sensor drift. This drift is assumed to be a polynomial
of order ORD. Use ORD=1 to consider a linear drift
The time series dat is passed to MatLab's POLYFIT routine to obtain a
polynomial fit PP to dat, and the difference dat-PP is compared to the
original dat. If the standard deviation of (dat-PP) is less than that
of dat by a factor of NSTD, the time series is assumed to contain a
significant trend (output will be 0), else not (output will be 1).
Implemented by:
2012-10-29: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance optimizations.
Usage:
qcflag = dataqc_polytrendtest(dat, t, ord_n, nstd, strict_validation)
where
qcflag = Boolean, 0 a trend is detected, 1 elsewhere.
dat = Input dataset, a numeric real vector.
t = time record associated with dat
ord_n (optional, defaults to 1) = Polynomial order.
nstd (optional, defaults to 3) = Factor by how much the standard
deviation must be reduced before qcflag switches from 1 to 0
strict_validation (optional, defaults to False) = Flag asserting
testing of inputs.
References:
OOI (2012). Data Product Specification for Trend Test. Document
Control Number 1341-10007. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10007_Data_Product_SPEC_TRNDTST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
t = np.atleast_1d(t)
if strict_validation:
for k, arg in {'dat': dat, 't': t, 'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
for k, arg in {'dat': dat, 't': t}.iteritems():
if not utils.isvector(arg):
raise ValueError('\'{0}\' must be a vector'.format(k))
for k, arg in {'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
ord_n = int(round(abs(ord_n)))
nstd = int(abs(nstd))
ll = len(dat)
# Not needed because time is incorporated as 't'
# t = range(ll)
pp = np.polyfit(t, dat, ord_n)
datpp = np.polyval(pp, t)
# test for a trend
if np.atleast_1d((np.std(dat - datpp) * nstd) < np.std(dat)).all():
trndtst = 0
else:
trndtst = 1
# insure output size equals input, even though test yields a single value.
qcflag = np.ones(dat.shape).astype('int8') * trndtst
return qcflag
def dataqc_stuckvaluetest_wrapper(x, reso, num, strict_validation=False):
if is_none(reso) or is_fill(reso) or is_none(num) or is_fill(num):
out = np.empty(x.shape, np.int8)
out.fill(-99)
return out
return dataqc_stuckvaluetest(x, np.atleast_1d(reso)[-1], np.atleast_1d(num)[-1], strict_validation=strict_validation)
def dataqc_stuckvaluetest(x, reso, num=10, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for "stuck
values", i.e. repeated occurences of one value. Returns 1 for
presumably good data and 0 for data presumed bad.
Implemented by:
2012-10-29: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
qcflag = =dataqc_stuckvaluetest(x, RESO, NUM);
where
qcflag = Boolean output: 0 where stuck values are found, 1 elsewhere.
x = Input time series (vector, numeric).
reso = Resolution; repeat values less than reso apart will be
considered "stuck values".
num = Minimum number of successive values within reso of each other
that will trigger the "stuck value". num is optional and defaults
to 10 if omitted or empty.
References:
OOI (2012). Data Product Specification for Stuck Value Test. Document
Control Number 1341-10008. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10008_Data_Product_SPEC_STUCKVL_OOI.pdf)
"""
dat = np.atleast_1d(x)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'x\' must be numeric')
if not utils.isvector(dat):
raise ValueError('\'x\' must be a vector')
if not utils.isreal(dat).all():
raise ValueError('\'x\' must be real')
for k, arg in {'reso': reso, 'num': num}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
num = np.abs(num)
dat = np.asanyarray(dat, dtype=np.float)
ll = len(x)
if ll < num:
# Warn - 'num' is greater than len(x), returning zeros
out = np.zeros(dat.size, dtype='int8')
else:
out = stuckvalues(dat, reso, num)
return out
def dataqc_gradienttest_wrapper(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
if is_none(ddatdx) or is_fill(ddatdx) or is_none(mindx) or is_fill(mindx) or is_none(startdat) or is_fill(startdat) or is_none(toldat) or is_fill(toldat):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
outqc = dataqc_gradienttest(dat, x, [-np.atleast_1d(ddatdx)[-1], np.atleast_1d(ddatdx)[-1]], np.atleast_1d(mindx)[-1], np.atleast_1d(startdat)[-1], np.atleast_1d(toldat)[-1], strict_validation=strict_validation)
return outqc
def dataqc_gradienttest(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
"""
Description
Data quality control algorithm testing if changes between successive
data points fall within a certain range.
Input data dat are given as a function of coordinate x. The algorithm
will flag dat values as bad if the change deltaDAT/deltaX between
successive dat values exceeds thresholds given in ddatdx. Once the
threshold is exceeded, following dat are considered bad until a dat
value returns to within toldat of the last known good value.
It is possible to remove data points that are too close together in x
coordinates (use mindx).
By default, the first value of dat is considered good. To change this,
use startdat and toldat to set as the first good data point the first
one that comes within toldat of startdat.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
outdat, outx, outqc = dataqc_gradienttest(dat, x, ddatdx, mindx,
startdat, toldat);
where
outdat = same as dat except that NaNs and values not meeting mindx are
removed.
outx = same as x except that NaNs and values not meeting mindx are
removed.
outqc = output quality control flags for outdat. 0 means bad data, 1
means good data.
dat = input dataset, a numeric real vector.
x = coordinate (e.g. time, distance) along which dat is given. Must be
of the same size as dat and strictly increasing.
ddatdx = two-element vector defining the valid range of ddat/dx
from one point to the next.
mindx = scalar. minimum dx for which this test will be applied (data
that are less than mindx apart will be deleted). defaults to zero
if NaN/empty.
startdat = start value (scalar) of dat that is presumed good. defaults
to first non-NaN value of dat if NaN/empty.
toldat = tolerance value (scalar) for dat; threshold to within which
dat must return to be counted as good, after exceeding a ddatdx
threshold detected bad data.
References:
OOI (2012). Data Product Specification for Gradient Test. Document
Control Number 1341-100010.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10010_Data_Product_SPEC_GRDTEST_OOI.pdf)
"""
if strict_validation:
if not utils.isvector(dat) or not utils.isvector(x):
raise ValueError('\'dat\' and \'x\' must be vectors')
if len(dat) != len(x):
raise ValueError('\'dat\' and \'x\' must be of equal len')
if not all( | np.diff(x) | numpy.diff |
import os
import math
import copy
import numpy as np
import astropy.io.fits as pf
from astropy.time import Time, TimeDelta
from astropy.stats import biweight_midvariance, mad_std
from sklearn.cluster import DBSCAN
from collections import OrderedDict
from utils import (baselines_2_ants, index_of, get_uv_correlations,
find_card_from_header, get_key, to_boolean_array,
check_issubset, convert_an_hdu, convert_fq_hdu,
mask_boolean_with_boolean)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import calendar
months_dict = {v: k for k, v in enumerate(calendar.month_abbr)}
months_dict_inv = {k: v for k, v in enumerate(calendar.month_abbr)}
try:
import pylab
except ImportError:
pylab = None
vec_complex = np.vectorize(np.complex)
vec_int = np.vectorize(np.int)
stokes_dict = {-4: 'LR', -3: 'RL', -2: 'LL', -1: 'RR', 1: 'I', 2: 'Q', 3: 'U',
4: 'V'}
def kernel(a, b, amp, scale):
sqdist = np.sum(a**2, 1).reshape(-1, 1) + np.sum(b**2, 1) - 2*np.dot(a, b.T)
return amp**2*np.exp(-0.5 * (1/(scale*scale)) * sqdist)
def gp_pred(amp, scale, v, t):
K_ss = kernel(t.reshape(-1, 1), t.reshape(-1, 1), amp, scale)
L = np.linalg.cholesky(K_ss+1e-6*np.eye(len(t)))
return np.dot(L, v.reshape(-1, 1))[:, 0]
def downscale_uvdata_by_freq(uvdata):
if abs(uvdata.hdu.data[0][0]) > 1:
downscale_by_freq = True
else:
downscale_by_freq = False
return downscale_by_freq
# FIXME: Handling FITS files with only one scan (used for CV)
class UVData(object):
def __init__(self, fname, mode='readonly', verify_option="silentfix"):
self.verify_option = verify_option
self.fname = fname
self.hdulist = pf.open(fname, mode=mode, save_backup=True)
self.hdulist.verify(self.verify_option)
self.hdu = self.hdulist[0]
self._stokes_dict = {'RR': 0, 'LL': 1, 'RL': 2, 'LR': 3}
self.learn_data_structure(self.hdu)
self._uvdata = self.view_uvdata({'COMPLEX': 0}) +\
1j * self.view_uvdata({'COMPLEX': 1})
self._weights = self.view_uvdata({'COMPLEX': 2})
# Numpy boolean arrays with shape of ``UVData.uvdata``.
self._nw_indxs = self._weights <= 0
self._pw_indxs = self._weights > 0
self._scans_bl = None
self._stokes = None
self._times = None
self._frequency = None
self._freq_width = None
self._freq_width_if = None
self._band_center = None
# Dictionary with keys - baselines & values - boolean numpy arrays or
# lists of boolean numpy arrays with indexes of that baseline (or it's
# scans) in ``UVData.uvdata`` array
self._indxs_baselines = dict()
self._indxs_baselines_scans = dict()
# Dictionary with keys - baselines and values - lists of arrays with
# timestamps. Each list - separate scan
self.baselines_scans_times = dict()
# Dictionary with keys - baselines & values - tuples or lists of tuples
# of shapes of part for that baseline (or it's scans) in
# ``UVData.uvdata`` array
self._shapes_baselines = dict()
self._shapes_baselines_scans = dict()
self._get_baselines_info()
self._noise_diffs = None
self._noise_v = None
rec = pf.getdata(self.fname, extname='AIPS AN')
# self._antenna_mapping = {number: rec['ANNAME'][i] for i, number in
# enumerate(self.antennas)}
self._antenna_mapping = {antenna: rec['ANNAME'][antenna-1] for antenna in self.antennas}
self._antennas_baselines = None
self._antennas_times = None
self._minimal_antennas_time = None
self._antennas_gains = None
def _get_baselines_info(self):
"""
Count indexes of visibilities on each single baseline (for single IF &
Stokes) in ``uvdata`` array.
"""
self._indxs_baselines_scans = self.scans_bl
for baseline in self.baselines:
indxs = self._get_baseline_indexes(baseline)
self._indxs_baselines[baseline] = indxs
self._shapes_baselines[baseline] = np.shape(self.uvdata[indxs])
self._shapes_baselines_scans[baseline] = list()
try:
for scan_indxs in self._indxs_baselines_scans[baseline]:
bl_scan_data = self.uvdata[scan_indxs]
self._shapes_baselines_scans[baseline].append(np.shape(bl_scan_data))
except TypeError:
pass
def nw_indxs_baseline(self, baseline, average_bands=False, stokes=None,
average_stokes=False):
"""
Shortcut to negative or zero weights visibilities on given baseline.
:param baseline:
Integer baseline number.
:param average_bands: (optional)
Average bands in that way that if any bands for current
visibility/stokes has negative weight then this visibility/stokes
has negative weight. (default: ``False``)
:param stokes: (optional)
Stokes parameters of ``self`` that output or use for calculation of
frequency averaged values.
:param average_stokes: (optional)
Average Stokes parameters chosen in ``stokes`` kw argument or all
present in data in that way that if any stokes for current
visibility has negative weight then this visibility has negative
weight. (default: ``False``)
:return:
Numpy boolean array with shape of ``(#vis, #bands, #stokes)`` or
``(#vis, #stokes)``, where #vis - number of visibilities for given
baseline & #stokes - number of stokes parameters in ``self`` or
``len(stokes)`` in ``stokes`` is not ``None``. (default: ``None``)
"""
result = self._nw_indxs[self._indxs_baselines[baseline]]
stokes_indxs = list()
if stokes is not None:
for stoke in stokes:
assert stoke in self.stokes
stokes_indxs.append(self.stokes_dict_inv[stoke])
result = result[:, :, stokes_indxs]
if average_bands:
result = np.asarray(~result, dtype=int)
result = np.prod(result, axis=1)
result = np.asarray(result, dtype=bool)
result = ~result
if average_stokes and not average_bands:
result = np.asarray(~result, dtype=int)
result = np.prod(result, axis=2)
result = np.asarray(result, dtype=bool)
result = ~result
if average_stokes and average_bands:
result = np.asarray(~result, dtype=int)
result = np.prod(result, axis=1)
result = np.asarray(result, dtype=bool)
result = ~result
return result
def pw_indxs_baseline(self, baseline, average_bands=False, stokes=None,
average_stokes=False):
"""
Shortcut to positive weights visibilities on given baseline.
:param baseline:
Integer baseline number.
:return:
Numpy boolean array with shape of ``(#vis, #bands, #stokes)``, where
#vis - number of visibilities for given baseline.
"""
return ~self.nw_indxs_baseline(baseline, average_bands=average_bands,
stokes=stokes,
average_stokes=average_stokes)
def _check_stokes_present(self, stokes):
"""
Check if ``stokes`` is present in data (could be calculated from data).
:param stokes:
String of Stokes parameters ("I, Q, U, V, RR, LL, RL, LR").
:return:
Boolean value.
"""
stokes_present = self.stokes
if stokes in stokes_present:
return True
elif stokes in ("I", "Q", "U", "V"):
if stokes in ("I", "V"):
return "RR" in stokes_present and "LL" in stokes_present
# If "Q" or "U"
else:
return "RL" in stokes_present and "LR" in stokes_present
elif stokes in ("RR", "LL", "RL", "LR"):
return stokes in stokes_present
else:
raise Exception("stokes must be from I, Q, U, V, RR, LL, RL or LR!")
def sync(self):
"""
Sync internal representation with complex representation and update
complex representation ``self._uvdata``. I need this because i don't
know how to make a complex view to real numpy.ndarray
"""
slices_dict = self.slices_dict.copy()
slices_dict.update({'COMPLEX': 0})
self.hdu.data.data[list(slices_dict.values())] = self.uvdata.real
slices_dict.update({'COMPLEX': 1})
self.hdu.data.data[list(slices_dict.values())] = self.uvdata.imag
def scale_uvw(self, scale):
suffix = '--'
try:
self.hdu.columns[self.par_dict['UU{}'.format(suffix)]].array /= scale
self.hdu.columns[self.par_dict['VV{}'.format(suffix)]].array /= scale
self.hdu.columns[self.par_dict['WW{}'.format(suffix)]].array /= scale
print("Dividing uvw on {}".format(scale))
except KeyError:
try:
suffix = '---SIN'
self.hdu.columns[self.par_dict['UU{}'.format(suffix)]].array /= scale
self.hdu.columns[self.par_dict['VV{}'.format(suffix)]].array /= scale
self.hdu.columns[self.par_dict['WW{}'.format(suffix)]].array /= scale
print("Dividing uvw on {}".format(scale))
except KeyError:
suffix = ''
self.hdu.columns[self.par_dict['UU{}'.format(suffix)]].array /= scale
self.hdu.columns[self.par_dict['VV{}'.format(suffix)]].array /= scale
self.hdu.columns[self.par_dict['WW{}'.format(suffix)]].array /= scale
print("Dividing uvw on {}".format(scale))
def save(self, fname=None, data=None, rewrite=False,
downscale_by_freq=False):
"""
Save uv-data to FITS-file.
:param data: (optional)
Numpy record array with uv-data & parameters info. If ``None`` then
save current instance's uv-data. (default: ``None``)
:param fname: (optional)
Name of FITS-file to save. If ``None`` then use current instance's
original file. (default: ``None``)
:param rewrite: (optional)
Boolean - rewrite file with original name if any? (default:
``False``)
"""
fname = fname or self.fname
if os.path.exists(fname) and rewrite:
os.unlink(fname)
if data is None:
if downscale_by_freq:
self._downscale_uvw_by_frequency()
self.hdulist.writeto(fname, output_verify=self.verify_option)
else:
# datas = np.array(sorted(data, key=lambda x: x['DATE']+x['_DATE']),
# dtype=data.dtype)
new_hdu = pf.GroupsHDU(data)
# PyFits updates header using given data (``GCOUNT`` key) anyway
new_hdu.header = self.hdu.header
hdulist = pf.HDUList([new_hdu])
for hdu in self.hdulist[1:]:
if hdu.header['EXTNAME'] == 'AIPS AN':
# FIXME:
try:
hdu = convert_an_hdu(hdu, new_hdu)
except IndexError:
print("You should fix that issue!")
pass
if hdu.header['EXTNAME'] == 'AIPS FQ':
hdu = convert_fq_hdu(hdu)
hdulist.append(hdu)
# FIXME: Sometimes i need this to be commented
if downscale_by_freq:
self._downscale_uvw_by_frequency()
hdulist.writeto(fname, output_verify=self.verify_option)
def save_fraction(self, fname, frac, random_state=0):
"""
Save only fraction of of data on each baseline.
:param fname:
File path to save.
:param frac:
Float (0., 1.). Fraction of points from each baseline to save.
"""
from sklearn.model_selection import ShuffleSplit
ss = ShuffleSplit(n_splits=1, test_size=1-frac,
random_state=random_state)
indxs = list()
for bl in self.baselines:
bl_indxs = self._indxs_baselines[bl]
print("Baseline {} has {} samples".format(bl,
np.count_nonzero(bl_indxs)))
bl_indxs_pw = self.pw_indxs_baseline(bl, average_bands=True,
stokes=['RR', 'LL'],
average_stokes=True)
bl_indxs = mask_boolean_with_boolean(bl_indxs, bl_indxs_pw)
for train, test in ss.split(np.nonzero(bl_indxs)[0]):
# tr = to_boolean_array(np.nonzero(bl_indxs)[0][train],
# len(bl_indxs))
tr = np.nonzero(bl_indxs)[0][train]
indxs.append(tr)
indxs = np.hstack(indxs)
indxs = sorted(indxs)
data = self.hdu.data[indxs]
self.save(fname, data, rewrite=True)
def save_uvrange(self, fname, uv_min):
"""
Save only fraction of of data on each baseline.
:param fname:
File path to save.
"""
indxs = list()
for bl in self.baselines:
bl_indxs = self._indxs_baselines[bl]
bl_indxs_pw = self.pw_indxs_baseline(bl, average_bands=True,
stokes=['RR', 'LL'],
average_stokes=True)
bl_indxs = mask_boolean_with_boolean(bl_indxs, bl_indxs_pw)
uv = self.uv[np.nonzero(bl_indxs)[0]]
uv_rad = np.hypot(uv[:, 0], uv[:, 1])
tr = np.nonzero(bl_indxs)[0][uv_rad > uv_min]
indxs.append(tr)
indxs = np.hstack(indxs)
indxs = sorted(indxs)
data = self.hdu.data[indxs]
self.save(fname, data, rewrite=True)
# TODO: for IDI extend this method
def learn_data_structure(self, hdu):
# Learn parameters
par_dict = OrderedDict()
for i, par in enumerate(hdu.data.names):
par_dict.update({par: i})
self.par_dict = par_dict
# Create mapping of FITS CTYPEi ``i`` number to dimensions of PyFits
# hdu.data[`DATA`] (hdu.data.data) numpy.ndarray.
data_dict = OrderedDict()
data_dict.update({'GROUP': (0, hdu.header['GCOUNT'])})
for i in range(hdu.header['NAXIS'], 1, -1):
data_dict.update({hdu.header['CTYPE' + str(i)]:
(hdu.header['NAXIS'] - i + 1,
hdu.header['NAXIS' + str(i)])})
# Save shape and dimensions of data recarray
self.data_dict = data_dict
self.nif = data_dict['IF'][1]
self.nstokes = data_dict['STOKES'][1]
# Create dictionary with necessary slices
slices_dict = OrderedDict()
for key, value in data_dict.items():
# FIXME: Generally we should avoid removing dims
if value[1] == 1 and key not in ['IF', 'STOKES']:
slices_dict.update({key: 0})
else:
slices_dict.update({key: slice(None, None)})
self.slices_dict = slices_dict
uvdata_slices_dict = OrderedDict()
for key, value in slices_dict.items():
if value != 0:
uvdata_slices_dict.update({key: value})
self.uvdata_slices_dict = uvdata_slices_dict
def new_slices(self, key, key_slice):
"""
Return VIEW of internal ``hdu.data.data`` numpy.ndarray with given
slice.
"""
slices_dict = self.slices_dict.copy()
slices_dict.update({key: key_slice})
return slices_dict
def view_uvdata(self, new_slices_dict):
"""
Return VIEW of internal ``hdu.data.data`` numpy.ndarray with given
slices.
:param new_slices_dict:
Ex. {'COMPLEX': slice(0, 1), 'IF': slice(0, 2)}
"""
slices_dict = self.slices_dict.copy()
for key, key_slice in new_slices_dict.items():
slices_dict.update({key: key_slice})
return self.hdu.data.data[list(slices_dict.values())]
@property
def stokes(self):
"""
Shortcut to correlations present (or Stokes parameters).
"""
if self._stokes is None:
ref_val = get_key(self.hdu.header, 'STOKES', 'CRVAL')
ref_pix = get_key(self.hdu.header, 'STOKES', 'CRPIX')
delta = get_key(self.hdu.header, 'STOKES', 'CDELT')
n_stokes = get_key(self.hdu.header, 'STOKES', 'NAXIS')
self._stokes = [stokes_dict[ref_val + (i - ref_pix) * delta] for i
in range(1, n_stokes + 1)]
return self._stokes
@property
def ra(self):
"""
:return:
Right Ascension of the observed source [deg]
"""
return get_key(self.hdu.header, 'RA', 'CRVAL')
@property
def dec(self):
"""
:return:
Declination of the observed source [deg]
"""
return get_key(self.hdu.header, 'DEC', 'CRVAL')
@property
def stokes_dict(self):
return {i: stokes for i, stokes in enumerate(self.stokes)}
@property
def stokes_dict_inv(self):
return {stokes: i for i, stokes in enumerate(self.stokes)}
@property
def uvdata(self):
"""
Returns (#groups, #if, #stokes,) complex numpy.ndarray with last
dimension - real&imag part of visibilities. It is A COPY of
``hdu.data.data`` numpy.ndarray.
"""
# Always return complex representation of internal ``hdu.data.data``
return self._uvdata
@uvdata.setter
def uvdata(self, other):
# Updates A COPY of ``hdu.data.data`` numpy.ndarray (complex repr.)
self._uvdata = other
# Sync internal representation with changed complex representation.
self.sync()
@property
def weights(self):
"""
Returns (#groups, #if, #stokes,) complex numpy.ndarray with last
dimension - weight of visibilities. It is A COPY of ``hdu.data.data``
numpy.ndarray.
"""
return self._weights
@property
def uvdata_weight_masked(self):
return np.ma.array(self.uvdata, mask=self._nw_indxs)
@property
def uvdata_freq_averaged(self):
"""
Returns ``self.uvdata`` averaged in IFs, that is complex numpy.ndarray
with shape (#N, #stokes).
"""
if self.nif > 1:
result = np.ma.mean(self.uvdata_weight_masked, axis=1)
# FIXME: if self.nif=1 then np.mean for axis=1 will remove this
# dimension. So don't need this if-else
else:
result = self.uvdata_weight_masked[:, 0, :]
return result
@property
def weights_nw_masked(self):
"""
Returns (#groups, #if, #stokes,) complex numpy.ndarray with last
dimension - weight of visibilities. It is A COPY of ``hdu.data.data``
numpy.ndarray.
"""
return np.ma.array(self._weights, mask=self._nw_indxs)
@property
def errors_from_weights(self):
"""
Returns (#groups, #if, #stokes,) complex numpy.ndarray with last
dimension - weight of visibilities. It is A COPY of ``hdu.data.data``
numpy.ndarray.
"""
return 1. / np.sqrt(self.weights_nw_masked)
@property
def errors_from_weights_masked_freq_averaged(self):
if self.nif > 1:
result = np.ma.mean(self.errors_from_weights, axis=1)/np.sqrt(self.nif)
else:
result = self.errors_from_weights[:, 0, :]
return result
@property
def baselines(self):
"""
Returns list of baselines numbers.
"""
result = list(set(self.hdu.data['BASELINE']))
return sorted(result)
@property
def antennas(self):
"""
Returns list of antennas numbers.
"""
return baselines_2_ants(self.baselines)
@property
def antenna_mapping(self):
"""
:return:
Dictionary with keys - antenna numbers and values - antenna names.
"""
return self._antenna_mapping
@property
def inverse_antenna_mapping(self):
"""
:return:
Dictionary with keys - antenna names and values - antenna numbers.
"""
return {v: k for k, v in self._antenna_mapping.items()}
@property
def frequency(self):
"""
Returns sky frequency in Hz.
"""
if self._frequency is None:
freq_card = find_card_from_header(self.hdu.header, value='FREQ')[0]
self._frequency = self.hdu.header['CRVAL{}'.format(freq_card[0][-1])]
return self._frequency
@property
def freq_width_if(self):
"""
Returns width of IF in Hz.
"""
if self._freq_width_if is None:
freq_card = find_card_from_header(self.hdu.header, value='FREQ')[0]
self._freq_width_if = self.hdu.header['CDELT{}'.format(freq_card[0][-1])]
return self._freq_width_if
@property
def freq_width(self):
"""
Returns width of all IFs in Hz.
"""
if self._freq_width is None:
freq_card = find_card_from_header(self.hdu.header, value='FREQ')[0]
self._freq_width = self.nif * self.hdu.header['CDELT{}'.format(freq_card[0][-1])]
return self._freq_width
@property
def band_center(self):
"""
Returns center of frequency bandwidth in Hz.
"""
if self._band_center is None:
self._band_center = self.frequency + self.freq_width_if * (self.nif / 2. - 0.5)
return self._band_center
@property
def times(self):
"""
Returns array of ``astropy.time.Time`` instances.
"""
if self._times is None:
self._times = Time(self.hdu.data['DATE'] + self.hdu.data['_DATE'],
format='jd')
return self._times
@property
def antennas_baselines(self):
if self._antennas_baselines is None:
self._antennas_baselines = dict()
for antenna in self.antennas:
self._antennas_baselines.update({antenna: list()})
for baseline in self.baselines:
ant1, ant2 = baselines_2_ants([baseline])
if ant1 == antenna or ant2 == antenna:
self._antennas_baselines[antenna].append(baseline)
return self._antennas_baselines
@property
def antennas_times(self):
if self._antennas_times is None:
self._antennas_times = dict()
for antenna in self.antennas:
self._antennas_times.update({antenna: list()})
for baseline in self.antennas_baselines[antenna]:
# Find times of current baseline
indexes = self._get_baseline_indexes(baseline)
bl_times = self.times[indexes]
self._antennas_times[antenna].extend(list(bl_times))
ordered_times = sorted(set(self._antennas_times[antenna]))
self._antennas_times[antenna] = ordered_times
return self._antennas_times
@property
def minimal_antennas_time(self):
if self._minimal_antennas_time is None:
minimal_times = [np.min(self.antennas_times[ant]) for ant in self.antennas]
self._minimal_antennas_time = np.min(minimal_times)
return self._minimal_antennas_time
def antennas_gains(self, amp_gpamp=np.exp(-3), amp_gpphase=np.exp(-3), scale_gpamp=np.exp(6),
scale_gpphase=np.exp(5), rewrite=False):
if self._antennas_gains is None or rewrite:
t_min = self.minimal_antennas_time
# For each antenna - create GP of amp and phase
self._antennas_gains = dict()
for ant in self.antennas:
self._antennas_gains[ant] = dict()
ant_time = self.antennas_times[ant]
tdeltas = [t - t_min for t in ant_time]
tdeltas = [dt.sec for dt in tdeltas]
for pol in ("r", "l"):
# Amplitude
v = np.random.normal(0, 1, size=len(tdeltas))
amp = 1 + gp_pred(amp_gpamp, scale_gpamp, v, np.array(tdeltas))
# Phase
v = np.random.normal(0, 1, size=len(tdeltas))
phase = gp_pred(amp_gpphase, scale_gpphase, v, np.array(tdeltas))
self._antennas_gains[ant][pol] = {"amp": {t: a for (t, a) in zip(ant_time, amp)},
"phase": {t: p for (t, p) in zip(ant_time, phase)}}
return self._antennas_gains
def plot_antennas_gains(self):
color_dict = {"r": "#1f77b4", "l": "#ff7f0e"}
antennas_gains = self.antennas_gains()
fig, axes = plt.subplots(len(antennas_gains), 2, sharex=True, figsize=(24, 20))
t_min = self.minimal_antennas_time
for i, ant in enumerate(antennas_gains):
ant_time = self.antennas_times[ant]
tdeltas = [t - t_min for t in ant_time]
tdeltas = [dt.sec for dt in tdeltas]
for pol in ("r", "l"):
amp = antennas_gains[ant][pol]["amp"]
phase = antennas_gains[ant][pol]["phase"]
if i == 0:
label = pol
else:
label = None
dots, = axes[i, 0].plot(tdeltas, list(amp.values()), '.', color=color_dict[pol])
if label is not None:
dots.set_label(label.upper())
axes[i, 0].legend(loc="upper right")
axes[i, 1].plot(tdeltas, list(phase.values()), '.', color=color_dict[pol])
axes[i, 1].yaxis.set_ticks_position("right")
axes[0, 0].set_title("Amplitudes")
axes[0, 1].set_title("Phases")
axes[i, 0].set_xlabel("time, s")
axes[i, 1].set_xlabel("time, s")
# if savefn:
# fig.savefig(savefn, bbox_inches="tight", dpi=300)
fig.show()
return fig
@property
def ngroups(self):
return self.hdu.header["GCOUNT"]
def inject_gains(self):
from cmath import exp
gains = self.antennas_gains()
baselines = self.hdu.data["BASELINE"]
for i in range(self.ngroups):
t = self.times[i]
baseline = baselines[i]
ant1, ant2 = baselines_2_ants([baseline])
amp1r = gains[ant1]["r"]["amp"][t]
amp1l = gains[ant1]["l"]["amp"][t]
amp2r = gains[ant2]["r"]["amp"][t]
amp2l = gains[ant2]["l"]["amp"][t]
phase1r = gains[ant1]["r"]["phase"][t]
phase1l = gains[ant1]["l"]["phase"][t]
phase2r = gains[ant2]["r"]["phase"][t]
phase2l = gains[ant2]["l"]["phase"][t]
gain1r = amp1r*exp(1j*phase1r)
gain1l = amp1l*exp(1j*phase1l)
gain2r = amp2r*exp(1j*phase2r)
gain2l = amp2l*exp(1j*phase2l)
# vis_real_new = amp1 * amp2 * (np.cos(phase1 - phase2) * vis_real - np.sin(phase1 - phase2) * vis_imag)
# vis_imag_new = amp1 * amp2 * (np.cos(phase1 - phase2) * vis_imag + np.sin(phase1 - phase2) * vis_real)
# vis_real_gained.append(vis_real_new)
# vis_imag_gained.append(vis_imag_new)
# Stokes 0 (RR)
if self._check_stokes_present("RR"):
self.uvdata[i, :, 0] = gain1r * gain2r.conjugate() * self.uvdata[i, :, 0]
if self._check_stokes_present("LL"):
self.uvdata[i, :, 1] = gain1l * gain2l.conjugate() * self.uvdata[i, :, 1]
if self._check_stokes_present("RL"):
self.uvdata[i, :, 2] = gain1r * gain2l.conjugate() * self.uvdata[i, :, 2]
if self._check_stokes_present("LR"):
self.uvdata[i, :, 3] = gain1l * gain2r.conjugate() * self.uvdata[i, :, 3]
self.sync()
def n_usable_visibilities_difmap(self, stokes="I", freq_average=False):
"""
Returns number of visibilities usable for fitting ``stokes``. To get
#DOF on has to double it (Re & Im parts) and subtract number of model
parameters.
:param stokes:
String of Stokes parameter or correlation.
:note:
For nonlinear models #DOF is actually a more complicated thing.
"""
self._check_stokes_present(stokes)
# (#, n_IF, 1) if not freq_average
stokes_vis = self._choose_uvdata(stokes=stokes,
freq_average=freq_average)
# Number of masked visibilities
n_bad = np.count_nonzero(stokes_vis.mask)
shape = stokes_vis.shape
if freq_average:
factor = 1.0
else:
factor = shape[1]
return shape[0]*factor - n_bad
def dof(self, model):
"""
Number of the Degrees Of Freedom given model.
:param model:
Instance of ``Model`` class. Should have ``stokes`` and ``size``
attributes.
:return:
Value of DOF.
:note:
For nonlinear models DOF != number of parameters in a model.
"""
return 2*self.n_usable_visibilities_difmap(stokes=model.stokes) - model.size
# TODO: Optionally use Q & U models for adding D-terms
def add_D(self, d_dict, imodel=None, qmodel=None, umodel=None):
"""
Add D-terms contribution (in linear approximation) to data.
See http://adsabs.harvard.edu/abs/1994ApJ...427..718R equations (A1) &
(A2)
:param d_dict:
Dictionary with keys [antenna name][integer of IF]["R"/"L"]
:param imodel: (optional)
Instance of ``Model`` class for Stokes I. If not ``None`` then use
FT of this model as Stokes I visibilities that are subject to
D-terms. (default: ``None``)
:param qmodel: (optional)
Instance of ``Model`` class for Stokes Q. If not ``None`` then use
FT of this model with those for ``U`` as a subject to
D-terms. (default: ``None``)
:param umodel: (optional)
Instance of ``Model`` class for Stokes U. If not ``None`` then use
FT of this model with those for ``U`` as a subject to
D-terms. (default: ``None``)
"""
from PA import PA
from utils import GRT_coordinates
if qmodel is not None:
if umodel is None:
raise Exception("Need both Q&U models!")
if umodel is not None:
if qmodel is None:
raise Exception("Need both Q&U models!")
if imodel is not None or qmodel is not None:
models = list()
if imodel is not None:
models.append(imodel)
if qmodel is not None:
models.extend([qmodel, umodel])
uvdata_copy = copy.deepcopy(self)
uvdata_copy.substitute(models)
else:
uvdata_copy = self
for baseline in self.baselines:
bl_indx = self._get_baseline_indexes(baseline)
JD = self.times.jd[bl_indx]
ant1, ant2 = baselines_2_ants([baseline])
antname1 = self.antenna_mapping[ant1]
antname2 = self.antenna_mapping[ant2]
latitude1, longitude1 = GRT_coordinates[antname1]
latitude2, longitude2 = GRT_coordinates[antname2]
for band in range(self.nif):
d1R = d_dict[antname1][band]["R"]
d1L = d_dict[antname1][band]["L"]
d2R = d_dict[antname2][band]["R"]
d2L = d_dict[antname2][band]["L"]
I = 0.5*(uvdata_copy.uvdata[bl_indx, band, self.stokes_dict_inv["RR"]] +
uvdata_copy.uvdata[bl_indx, band, self.stokes_dict_inv["LL"]])
pa1 = PA(JD, self.ra, self.dec, latitude1, longitude1)
pa2 = PA(JD, self.ra, self.dec, latitude2, longitude2)
# D_{1,R}*exp(+2i*fi_1)*I_{1,2} + D^*_{2,L}*exp(+2i*fi_2)*I_{1,2} --- for RL
# D_{1,L}*exp(-2i*fi_1)*I_{1,2} + D^*_{2,R}*exp(-2i*fi_2)*I_{1,2} --- for LR
# add_RL = I*(d1R*np.exp(2j*pa1) + d2L.conj()*np.exp(2j*pa2))
# add_LR = I*(d1L*np.exp(-2j*pa1) + d2R.conj()*np.exp(-2j*pa2))
add_RL = I*(d1R*np.exp(1j*(pa1-pa2)) + d2L.conj()*np.exp(1j*(-pa1+pa2)))
add_LR = I*(d1L*np.exp(1j*(pa1+pa2)) + d2R.conj()*np.exp(1j*(pa1-pa2)))
self.uvdata[bl_indx, band, self.stokes_dict_inv["RL"]] =\
self.uvdata[bl_indx, band, self.stokes_dict_inv["RL"]] + add_RL
self.uvdata[bl_indx, band, self.stokes_dict_inv["LR"]] =\
self.uvdata[bl_indx, band, self.stokes_dict_inv["LR"]] + add_LR
self.sync()
@property
def scans(self):
"""
Returns list of times that separates different scans. If NX table is
present in the original
:return:
numpy.ndarray with shape (#scans, 2,) with start & stop time for each
of #scans scans.
"""
try:
indx = self.hdulist.index_of('AIPS NX')
print("Found AIPS NX table!")
except KeyError:
indx = None
print("No AIPS NX table are found!")
if indx is not None:
nx_hdu = self.hdulist[indx]
scans = (np.vstack((nx_hdu.data['TIME'], nx_hdu.data['TIME'] +
nx_hdu.data['TIME INTERVAL']))).T
else:
scans = None
return scans
# FIXME: doesn't work for ``J0005+3820_X_1998_06_24_fey_vis.fits``
# FIXME: Sometimes only 1 measurement in `scan`. It results in noise =
# ``nan`` for that scan
# FIXME: It would be better to output indexes of different scans for each
# baselines
@property
def __scans_bl(self):
"""
Calculate scans for each baseline separately.
It won't coincide with UVData.scans because different baselines have
different number of scans.
:return:
Dictionary with scans borders for each baseline.
"""
scans_dict = dict()
all_times = self.hdu.columns[self.par_dict['DATE']].array
all_a, all_b = np.histogram(all_times[1:] - all_times[:-1])
for bl in self.baselines:
# print "Processing baseline ", bl
bl_indxs = self._choose_uvdata(baselines=bl)[1]
bl_times = self.hdu.columns[self.par_dict['DATE']].array[bl_indxs]
a, b = np.histogram(bl_times[1:] - bl_times[:-1])
# If baseline consists only of 1 scan
if b[-1] < all_b[1]:
scans_dict.update({bl: np.atleast_2d([bl_times[0],
bl_times[-1]])})
# If baseline has > 1 scan
else:
scan_borders = bl_times[(np.where((bl_times[1:] -
bl_times[:-1]) > b[1])[0])]
scans_list = [[bl_times[0], scan_borders[0]]]
for i in range(len(scan_borders) - 1):
scans_list.append([float(bl_times[np.where(bl_times == scan_borders[i])[0] + 1]),
scan_borders[i + 1]])
scans_list.append([float(bl_times[np.where(bl_times == scan_borders[i + 1])[0] + 1]),
bl_times[-1]])
scans_dict.update({bl: np.asarray(scans_list)})
return scans_dict
@property
def scans_bl(self):
if self._scans_bl is None:
scans_dict = dict()
for bl in self.baselines:
self.baselines_scans_times[bl] = list()
bl_scans = list()
bl_indxs = self._get_baseline_indexes(bl)
# JD-formatted times for current baseline
bl_times = self.hdu.data['DATE'][bl_indxs] +\
self.hdu.data['_DATE'][bl_indxs]
bl_times = bl_times.reshape((bl_times.size, 1))
db = DBSCAN(eps=TimeDelta(120., format='sec').jd, min_samples=3,
leaf_size=5).fit(bl_times)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Make start/stop for each scan
bl_times = bl_times[:, 0]
for label in set(labels):
scan_times = bl_times[np.where(labels == label)]
self.baselines_scans_times[bl].append(scan_times)
if -1 in set(labels):
ant1, ant2 = baselines_2_ants([bl])
print("Non-typical scan structure for baseline {}, antennas {}-{}".format(bl, ant1, ant2))
scans_dict[bl] = None
else:
bl_indxs_ = np.array(bl_indxs, dtype=int)
bl_indxs_[bl_indxs] = labels + 1
for i in set(labels):
bl_scans.append(bl_indxs_ == i + 1)
scans_dict[bl] = bl_scans
self._scans_bl = scans_dict
return self._scans_bl
def _downscale_uvw_by_frequency(self):
suffix = '--'
try:
u = self.hdu.columns[self.par_dict['UU{}'.format(suffix)]].array
v = self.hdu.columns[self.par_dict['VV{}'.format(suffix)]].array
w = self.hdu.columns[self.par_dict['WW{}'.format(suffix)]].array
except KeyError:
try:
suffix = '---SIN'
u = self.hdu.columns[self.par_dict['UU{}'.format(suffix)]].array
v = self.hdu.columns[self.par_dict['VV{}'.format(suffix)]].array
w = self.hdu.columns[self.par_dict['WW{}'.format(suffix)]].array
except KeyError:
suffix = ''
u = self.hdu.columns[self.par_dict['UU{}'.format(suffix)]].array
v = self.hdu.columns[self.par_dict['VV{}'.format(suffix)]].array
w = self.hdu.columns[self.par_dict['WW{}'.format(suffix)]].array
if abs(np.mean(u)) > 1.:
self.hdu.columns[self.par_dict['UU{}'.format(suffix)]].array /= self.frequency
self.hdu.columns[self.par_dict['VV{}'.format(suffix)]].array /= self.frequency
self.hdu.columns[self.par_dict['WW{}'.format(suffix)]].array /= self.frequency
def _upscale_uvw_by_frequency(self):
suffix = '--'
try:
u = self.hdu.columns[self.par_dict['UU{}'.format(suffix)]].array
v = self.hdu.columns[self.par_dict['VV{}'.format(suffix)]].array
w = self.hdu.columns[self.par_dict['WW{}'.format(suffix)]].array
except KeyError:
try:
suffix = '---SIN'
u = self.hdu.columns[self.par_dict['UU{}'.format(suffix)]].array
v = self.hdu.columns[self.par_dict['VV{}'.format(suffix)]].array
w = self.hdu.columns[self.par_dict['WW{}'.format(suffix)]].array
except KeyError:
suffix = ''
u = self.hdu.columns[self.par_dict['UU{}'.format(suffix)]].array
v = self.hdu.columns[self.par_dict['VV{}'.format(suffix)]].array
w = self.hdu.columns[self.par_dict['WW{}'.format(suffix)]].array
if abs(np.mean(u)) < 1.:
self.hdu.columns[self.par_dict['UU{}'.format(suffix)]].array *= self.frequency
self.hdu.columns[self.par_dict['VV{}'.format(suffix)]].array *= self.frequency
self.hdu.columns[self.par_dict['WW{}'.format(suffix)]].array *= self.frequency
@property
def uvw(self):
"""
Shortcut for all (u, v, w)-elements of self.
:return:
Numpy.ndarray with shape (N, 3,), where N is the number of (u, v, w)
points.
"""
suffix = '--'
try:
u = self.hdu.columns[self.par_dict['UU{}'.format(suffix)]].array
v = self.hdu.columns[self.par_dict['VV{}'.format(suffix)]].array
w = self.hdu.columns[self.par_dict['WW{}'.format(suffix)]].array
except KeyError:
try:
suffix = '---SIN'
u = self.hdu.columns[self.par_dict['UU{}'.format(suffix)]].array
v = self.hdu.columns[self.par_dict['VV{}'.format(suffix)]].array
w = self.hdu.columns[self.par_dict['WW{}'.format(suffix)]].array
except KeyError:
suffix = ''
u = self.hdu.columns[self.par_dict['UU{}'.format(suffix)]].array
v = self.hdu.columns[self.par_dict['VV{}'.format(suffix)]].array
w = self.hdu.columns[self.par_dict['WW{}'.format(suffix)]].array
if abs(np.mean(u)) < 1.:
u *= self.frequency
v *= self.frequency
w *= self.frequency
return np.vstack((u, v, w)).T
@property
def uv(self):
"""
Shortcut for (u, v) -coordinates of visibility values.
:return:
Numpy.ndarray with shape (N, 2,), where N is the number of (u, v, w)
points.
"""
return self.uvw[:, :2]
@property
def imsize_by_uv_coverage(self):
"""
Calculate image size & pixel size using UV-plane coverage information.
"""
raise NotImplementedError
def _get_baseline_indexes(self, baseline):
"""
Return boolean numpy array with indexes of given baseline in original
record array.
"""
assert baseline in self.baselines
try:
indxs = self._indxs_baselines[baseline]
except KeyError:
indxs = self.hdu.data['BASELINE'] == baseline
return indxs
def _get_baselines_indexes(self, baselines):
"""
Return boolean numpy array with indexes of given baselines in original
record array.
"""
result = self._get_baseline_indexes(baseline=baselines[0])
try:
for baseline in baselines[1:]:
result = np.logical_or(result, self._get_baseline_indexes(baseline))
# When ``baselines`` consists of only one item
except TypeError:
pass
return result
def _get_times_indexes(self, start_time, stop_time):
"""
Return numpy boolean array with indexes between given time in original
record array.
:param start_time:
Instance of ``astropy.time.Time`` class.
:param stop_time:
Instance of ``astropy.time.Time`` class.
"""
return np.logical_and(start_time <= self.times, stop_time >= self.times)
def _conver_bands_to_indexes(self, bands):
"""
Convert iterable of band numbers to boolean array with ``True`` values
for given bands.
:param bands:
Iterable of integers (starting from zero) - band numbers.
:return:
Numpy boolean array with size equal to number of bands and ``True``
values corresponding to specified band numbers.
"""
assert set(bands).issubset(range(self.nif)), "Bands number must be" \
" from 0 to {}".format(self.nif)
assert max(bands) <= self.nif
return to_boolean_array(bands, self.nif)
def _convert_stokes_to_indexes(self, stokes):
"""
Convert iterable of correlations to boolean array with ``True`` values
for given correlations.
:param stokes:
Iterable of strings - correlations.
:return:
Numpy boolean array with size equal to number of correlations and
``True`` values corresponding to specified correlations.
"""
for single_stokes in stokes:
assert check_issubset(single_stokes, self.stokes), "Must be RR, LL, RL or LR!"
stokes_num = [self.stokes_dict_inv[stokes_] for stokes_ in stokes]
return to_boolean_array(stokes_num, self.nstokes)
def _get_uvdata_slice(self, baselines=None, start_time=None, stop_time=None,
bands=None, stokes=None):
"""
Return tuple of index arrays that represent portion of ``UVData.uvdata``
array with given values of baselines, times, bands, stokes.
:param stokes: (optional)
Iterable of correlations or Stokes parameters that are present in
self.
"""
if baselines is None:
baselines = self.baselines
indxs = self._get_baselines_indexes(baselines)
if start_time is not None or stop_time is not None:
indxs = np.logical_and(indxs, self._get_times_indexes(start_time,
stop_time))
if bands is None:
bands_indxs = self._conver_bands_to_indexes(range(self.nif))
else:
bands_indxs = self._conver_bands_to_indexes(bands)
if stokes is None:
stokes = self.stokes
stokes_indxs = self._convert_stokes_to_indexes(stokes)
return np.ix_(indxs, bands_indxs, stokes_indxs)
def _convert_uvdata_slice_to_bool(self, sl):
"""
Convert indexing tuple to boolean array of ``UVData.uvdata`` shape.
:param sl:
Tuple of indexing arrays. Output of ``self._get_uvdata_slice``.
:return:
Boolean numpy array with shape of ``UVData.uvdata``.
"""
boolean = np.zeros(self.uvdata.shape, dtype=bool)
boolean[sl] = True
return boolean
def _choose_uvdata(self, start_time=None, stop_time=None, baselines=None,
bands=None, stokes=None, freq_average=False):
"""
Method that returns chosen data from ``_data`` numpy structured array
based on user specified parameters.
:param start_time: (optional)
Instance of ``astropy.time.Time`` class. (default: ``None``)
:param stop_time: (optional)
Instance of ``astropy.time.Time`` class. (default: ``None``)
:param baselines: (optional)
One or iterable of baselines numbers or ``None``. If ``None`` then
use all baselines. (default: ``None``)
:param bands: (optional)
Iterable of IF numbers (0 to #IF-1) or ``None``. If ``None`` then
use all IFs. (default: ``None``)
:param stokes: (optional)
Any string of: ``I``, ``Q``, ``U``, ``V``, ``RR``, ``LL``, ``RL``,
``LR`` or ``None``. If ``None`` then use all available correlations.
(default: ``None``)
:return:
Numpy.ndarray that is part of (copy) ``UVData.uvdata`` array with
shape (#N, #IF, #STOKES).
"""
# Copy with shape (#N, #IF, #STOKES)
uvdata = self.uvdata_weight_masked
if start_time is None:
start_time = self.times[0]
if stop_time is None:
stop_time = self.times[-1]
if stokes is None:
stokes = self.stokes
sl = self._get_uvdata_slice(baselines, start_time, stop_time, bands,
stokes)
result = uvdata[sl]
elif check_issubset(stokes, self.stokes):
stokes = [stokes]
sl = self._get_uvdata_slice(baselines, start_time, stop_time, bands,
stokes)
result = uvdata[sl]
elif check_issubset(stokes, ('I', 'Q', 'U', 'V')):
if stokes in ('I', 'V'):
sl_rr = self._get_uvdata_slice(baselines, start_time, stop_time,
bands, stokes=['RR'])
sl_ll = self._get_uvdata_slice(baselines, start_time, stop_time,
bands, stokes=['LL'])
if stokes == 'I':
# I = 0.5 * (RR + LL)
result = 0.5 * (uvdata[sl_rr] + uvdata[sl_ll])
else:
# V = 0.5 * (RR - LL)
result = 0.5 * (uvdata[sl_rr] - uvdata[sl_ll])
if stokes in ('Q', 'U'):
sl_rl = self._get_uvdata_slice(baselines, start_time, stop_time,
bands, stokes=['RL'])
sl_lr = self._get_uvdata_slice(baselines, start_time, stop_time,
bands, stokes=['LR'])
if stokes == 'Q':
# V = 0.5 * (LR + RL)
result = 0.5 * (uvdata[sl_lr] + uvdata[sl_rl])
else:
# V = 0.5 * 1j * (LR - RL)
result = 0.5 * 1j * (uvdata[sl_lr] - uvdata[sl_rl])
else:
raise Exception("Stokes must be iterable consisting of following "
"items only: I, Q, U, V, RR, LL, RL, LR!")
if freq_average:
result = np.ma.mean(result, axis=1).squeeze()
return result
def noise_v(self, average_bands=False):
"""
Calculate noise for each baseline using Stokes ``V`` data.
:param average_bands: (optional)
Boolean - average bands after noise calculation?
:return:
Dictionary with keys - baseline numbers & values - numpy arrays with
shape (#bands, #stokes) or (#stokes,) if ``average_bands=True``.
"""
if self._noise_v is None:
baseline_noises = dict()
for baseline in self.baselines:
uvdata = self._choose_uvdata(baselines=[baseline])
v = uvdata[..., 0] - uvdata[..., 1]
mask = np.logical_or(np.isnan(v), v.mask)
# #groups, #bands
data = np.ma.array(v, mask=mask)
mstd = list()
for band_data in data.T:
mstd.append(0.5 * (biweight_midvariance(band_data.real) +
biweight_midvariance(band_data.imag)))
baseline_noises[baseline] =\
np.array(mstd).repeat(self.nstokes).reshape((self.nif,
self.nstokes))
self._noise_v = baseline_noises.copy()
if average_bands:
return {baseline: np.nanmean(mstd, axis=0) for baseline, mstd in
self._noise_v.items()}
return self._noise_v
def noise_diffs(self, average_bands=False):
"""
Calculate noise for each baseline using successive differences approach
(Brigg's dissertation).
:param average_bands: (optional)
Boolean - average bands after noise calculation?
:return:
Dictionary with keys - baseline numbers & values - numpy arrays with
shape (#bands, #stokes) or (#stokes,) if ``average_bands=True``.
"""
if self._noise_diffs is None:
baseline_noises = dict()
for baseline in self.baselines:
uvdata = self._choose_uvdata(baselines=[baseline])
diffs = uvdata[:-1, ...] - uvdata[1:, ...]
mask = np.logical_or(np.isnan(diffs), diffs.mask)
# #groups, #bands
data = np.ma.array(diffs, mask=mask)
mstd = np.zeros((self.nif, self.nstokes))
for if_ in range(self.nif):
for stoke in range(self.nstokes):
data_ = data[:, if_, stoke]
# mstd[if_, stoke] += biweight_midvariance(data_.real)
# mstd[if_, stoke] += biweight_midvariance(data_.imag)
mstd[if_, stoke] += np.std(data_.real)
mstd[if_, stoke] += np.std(data_.imag)
mstd[if_, stoke] *= 0.5
baseline_noises[baseline] = mstd
self._noise_diffs = baseline_noises.copy()
if average_bands:
return {baseline: np.nanmean(mstd, axis=0) for baseline, mstd in
self._noise_diffs.items()}
return self._noise_diffs
def noise(self, split_scans=False, use_V=True, average_freq=False):
"""
Calculate noise for each baseline. If ``split_scans`` is True then
calculate noise for each scan too. If ``use_V`` is True then use stokes
V data (`RR`` - ``LL``) for computation assuming no signal in V. Else
use successive differences approach (Brigg's dissertation).
:param split_scans: (optional)
Should we calculate noise for each scan? (default: ``False``)
:param use_V: (optional)
Use stokes V data (``RR`` - ``LL``) to calculate noise assuming no
signal in stokes V? If ``False`` then use successive differences
approach (see Brigg's dissertation). (default: ``True``)
:param average_freq: (optional)
Use IF-averaged data for calculating noise? (default: ``False``)
:return:
Dictionary with keys - baseline numbers & values - arrays of shape
([#scans], [#IF], [#stokes]). It means (#scans, #IF) if
``split_scans=True`` & ``use_V=True``, (#IF, #stokes) if
``split_scans=False`` & ``use_V=False``, (#scans, #IF, #stokes) if
``split_scans=True``, ``use_V=False`` & ``average_freq=False`` etc.
"""
baselines_noises = dict()
if use_V:
# Calculate dictionary {baseline: noise} (if split_scans is False)
# or {baseline: [noises]} if split_scans is True.
if not split_scans:
for baseline in self.baselines:
baseline_uvdata = self._choose_uvdata(baselines=[baseline])
if average_freq:
baseline_uvdata = np.mean(baseline_uvdata, axis=1)
# V = 0.5*(RR-LL)
v = 0.5*(baseline_uvdata[..., 0] - baseline_uvdata[..., 1]).real
mask = ~np.isnan(v)
# sigma_V = 0.5*sqrt(sigma_RR^2 + sigma_LL^2)=0.5*sqrt(2)*sigma_RR,LL
# => sigmaRR,LL,RL,LR = sqrt(2)*sigma_V
baselines_noises[baseline] =\
np.sqrt(2.0)*np.asarray(mad_std(np.ma.array(v, mask=np.invert(mask)).data,
axis=0))
# np.asarray(np.std(np.ma.array(v, mask=np.invert(mask)).data,
# axis=0))
else:
# Use each scan
for baseline in self.baselines:
baseline_noise = list()
try:
for scan_bl_indxs in self.scans_bl[baseline]:
# (#obs in scan, #nif, #nstokes,)
scan_baseline_uvdata = self.uvdata[scan_bl_indxs]
if average_freq:
# (#obs in scan, #nstokes,)
scan_baseline_uvdata = np.mean(scan_baseline_uvdata,
axis=1)
v = 0.5*(scan_baseline_uvdata[..., 0] -
scan_baseline_uvdata[..., 1]).real
mask = ~np.isnan(v)
scan_noise = np.sqrt(2.0)*np.asarray(np.std(np.ma.array(v, mask=np.invert(mask)).data, axis=0))
baseline_noise.append(scan_noise)
baselines_noises[baseline] = np.asarray(baseline_noise)
except TypeError:
baselines_noises[baseline] = None
else:
if not split_scans:
for baseline in self.baselines:
# (#, #IF, #Stokes)
baseline_uvdata = self._choose_uvdata(baselines=[baseline])
if average_freq:
baseline_uvdata = np.mean(baseline_uvdata, axis=1)
# (#, #IF, #Stokes)
differences = (baseline_uvdata[:-1, ...] -
baseline_uvdata[1:, ...])/np.sqrt(2.0)
mask = np.isnan(differences)
# (#IF, #Stokes)
baselines_noises[baseline] = \
np.asarray([mad_std(np.ma.array(differences,
mask=mask).real[..., i], axis=0) for i
in range(self.nstokes)]).T
else:
# Use each scan
for baseline in self.baselines:
baseline_noise = list()
try:
for scan_bl_indxs in self.scans_bl[baseline]:
# (#obs in scan, #nif, #nstokes,)
scan_baseline_uvdata = self.uvdata[scan_bl_indxs]
if average_freq:
# shape = (#obs in scan, #nstokes,)
scan_baseline_uvdata = np.mean(scan_baseline_uvdata,
axis=1)
# (#obs in scan, #nif, #nstokes,)
differences = (scan_baseline_uvdata[:-1, ...] -
scan_baseline_uvdata[1:, ...])/np.sqrt(2.0)
mask = ~np.isnan(differences)
# (nif, nstokes,)
scan_noise = np.asarray([mad_std(np.ma.array(differences,
mask=np.invert(mask)).real[..., i],
axis=0) for i in
range(self.nstokes)]).T
baseline_noise.append(scan_noise)
baselines_noises[baseline] = np.asarray(baseline_noise)
except TypeError:
baselines_noises[baseline] = None
return baselines_noises
def noise_add(self, noise=None, df=None, split_scans=False):
"""
Add noise to visibilities. Here std - standard deviation of
real/imaginary component.
:param noise:
Mapping from baseline number to:
1) std of noise. Will use one value of std for all stokes and IFs.
2) 1D array with shape (#IF). Will use different values of std for
different IFs. This is an option if stokes V was used to calculate
the noise.
3) 2D array with shape (#IF, #Stokes). This is an option if
differences between neighbor visibilities were used to calculate
the noise.
:param df: (optional)
Number of d.o.f. for standard Student t-distribution used as noise
model. If set to ``None`` then use gaussian noise model. (default:
``None``)
:param split_scans: (optional)
Is parameter ``noise`` is mapping from baseline numbers to
iterables of std of noise for each scan on baseline? (default:
``False``)
"""
# TODO: if on df before generating noise values
for baseline, baseline_stds in noise.items():
# i - IF number, std (#IF, #Stokes)
for i, std in enumerate(baseline_stds):
# (#, 1, #stokes)
for stokes in self.stokes:
j = self.stokes_dict_inv[stokes]
baseline_uvdata =\
self._choose_uvdata(baselines=[baseline], bands=[i],
stokes=stokes)
# (#, #IF, #CH, #stokes)
n = len(baseline_uvdata)
sl = self._get_uvdata_slice(baselines=[baseline], bands=[i],
stokes=(stokes,))
try:
std_stokes = std[j]
except IndexError:
std_stokes = std
noise_to_add = vec_complex(np.random.normal(scale=std_stokes,
size=n),
np.random.normal(scale=std_stokes,
size=n))
noise_to_add = np.reshape(noise_to_add,
baseline_uvdata.shape)
baseline_uvdata += noise_to_add
self.uvdata[sl] = baseline_uvdata
self.sync()
# TODO: Optionally calculate noise by scans.
def error(self, average_freq=False, use_V=True):
"""
Shortcut for error associated with each visibility.
It uses noise calculations based on zero V stokes or successive
differences implemented in ``noise()`` method to infer sigma of
gaussian noise. Later it is supposed to add more functionality (see
Issue #8).
:param average_freq: (optional)
Use IF-averaged data for calculating errors? (default: ``False``)
:param use_V: (optional)
Boolean. Calculate noise using Stokes `V` or successive differences?
(default: ``True``)
:return:
Numpy.ndarray with shape (#N, [#IF,] #stokes,) where #N - number of
groups.
"""
noise_dict = self.noise(use_V=use_V, split_scans=False,
average_freq=average_freq)
if not average_freq:
error = np.empty((len(self.uvdata), self.nif,
self.nstokes,), dtype=float)
else:
error = np.empty((len(self.uvdata), self.nstokes,),
dtype=float)
for i, baseline in enumerate(self.hdu.data['BASELINE']):
# FIXME: Until ``UVData.noise`` always returns (#, [#IF],
# #Stokes) even for ``use_V=True`` - i must repeat array for
# each Stokes if ``use_V=True`` is used!
error[i] = noise_dict[baseline]
return error
def scale_amplitude(self, scale):
"""
Scale amplitude of uv-data by some scale factor. Changes ``uvdata`` and
synchronizes internal representation with it.
:param scale:
Float. Factor of scaling.
"""
self.uvdata *= scale
self.sync()
def scale_hands(self, scale_r=1.0, scale_l=1.0):
"""
Scale correlations of uv-data by some scale factor. Changes ``uvdata``
and synchronizes internal representation with it.
:param scale_r: (optional)
Float. Factor of scaling for gains R. (default: ``1.0``)
:param scale_l: (optional)
Float. Factor of scaling for gains L. (default: ``1.0``)
"""
for stokes, index in self.stokes_dict_inv.items():
if stokes == 'RR':
self.uvdata[..., index] *= scale_r**2
elif stokes == 'LL':
self.uvdata[..., index] *= scale_l**2
elif stokes == 'RL':
self.uvdata[..., index] *= scale_r*scale_l
elif stokes == 'LR':
self.uvdata[..., index] *= scale_l*scale_r
else:
raise Exception("Implemented only for RR, LL, RL & LR!")
self.sync()
def scale_cross_hands(self, scale):
assert "RL" in self.stokes
assert "LR" in self.stokes
self.uvdata[..., 2] *= scale
self.uvdata[..., 3] *= scale
self.sync()
# FIXME: Fix logic of arguments. How one can plot one antennas with others,
# several baselines, all baselines etc.
def uv_coverage(self, antennas=None, baselines=None, sym='.k', fig=None):
"""
Make plots of uv-coverage for selected baselines/antennas.
:param antennas: (optional)
:param baselines: (optional)
:param sym: (optional)
Matplotlib symbols to plot. (default: ``.k``)
"""
if antennas is None and baselines is None:
antennas = self.antennas
if baselines is None:
baselines = set(self.baselines)
else:
baselines_list = list()
# If ``baselines`` is iterable
try:
baselines_list.extend(baselines)
# If ``baselines`` is not iterable (int)
except TypeError:
baselines_list.append(baselines)
baselines = set(baselines_list)
# Check that given baseline numbers are among existing ones
assert(baselines.issubset(self.baselines))
# Find what baselines to display
baselines_to_display = list()
antennas_list = list()
# If ``antennas`` is iterable
try:
antennas_list.extend(antennas)
# If ``antennas`` is not iterable (int)
except TypeError:
antennas_list.append(antennas)
# If more than one antennas are selected
if len(antennas_list) > 1:
from itertools import combinations
for ant1, ant2 in combinations(antennas_list, 2):
bl = ant2 + 256 * ant1
baselines_to_display.append(float(bl))
else:
for bl in self.baselines:
ant1, ant2 = baselines_2_ants([bl])
if antennas_list[0] in (ant1, ant2):
baselines_to_display.append(bl)
indxs = self._get_baselines_indexes(baselines=baselines_to_display)
observed = self._choose_uvdata(baselines=baselines_to_display,
stokes='I', freq_average=True)
# model = model_uvdata._choose_uvdata(baselines=baselines_to_display,
# stokes='I', freq_average=True)
# diff = np.angle(observed) - np.angle(model)
uv = self.uv[indxs]
if fig is None:
fig, axes = matplotlib.pyplot.subplots(1, 1)
else:
axes = fig.get_axes()[0]
# if model_uvdata is None:
axes.plot(uv[:, 0], uv[:, 1], sym, ms=0.5)
# FIXME: This is right only for RR/LL!
axes.plot(-uv[:, 0], -uv[:, 1], sym, ms=0.5)
# else:
# axes.scatter(uv[:, 0], uv[:, 1], c=diff, alpha=1, s=4,
# cmap='jet', vmin=-0.75, vmax=0.75)
# # FIXME: This is right only for RR/LL!
# im = axes.scatter(-uv[:, 0], -uv[:, 1], c=-diff, alpha=1, s=4,
# cmap='jet', vmin=-0.75, vmax=0.75)
# from mpl_toolkits.axes_grid1 import make_axes_locatable
# divider = make_axes_locatable(axes)
# cax = divider.append_axes("right", size="10%", pad=0.00)
# cb = fig.colorbar(im, cax=cax)
# cb.set_label(r"$\phi_{\rm obs} - \phi_{\rm model}$, rad")
# Find max(u & v)
umax = max(abs(uv[:, 0]))
vmax = max(abs(uv[:, 1]))
uvmax = max(umax, vmax)
uv_range = [-1.1 * uvmax, 1.1 * uvmax]
axes.set_xlim(uv_range)
axes.set_ylim(uv_range)
axes.set_aspect('equal')
axes.set_xlabel('U, wavelengths')
axes.set_ylabel('V, wavelengths')
fig.show()
return fig
def __copy__(self):
return self
def __deepcopy__(self, memo):
return UVData(self.hdulist.filename(), mode='readonly')
def __add__(self, other):
"""
Add to self another instance of UVData.
:param other:
Instance of ``UVData`` class. Or object that has ``uvdata``
attribute that is numpy structured array with the same ``dtype`` as
``self``.
:return:
Instance od ``UVData`` class with uv-data in ``uvdata`` attribute
that is sum of ``self`` and other.
"""
assert(self.uvdata.shape == other.uvdata.shape)
assert(len(self.uvdata) == len(other.uvdata))
self_copy = copy.deepcopy(self)
self_copy.uvdata = self.uvdata + other.uvdata
self_copy.sync()
return self_copy
def __sub__(self, other):
"""
Substruct from self another instance of UVData.
:param other:
Instance of ``UVData`` class. Or object that has ``uvdata``
attribute that is numpy structured array with the same ``dtype`` as
``self``.
:return:
Instance od ``UVData`` class with uv-data in ``uvdata`` attribute
that is difference of ``self`` and other.
"""
assert(self.uvdata.shape == other.uvdata.shape)
assert(len(self.uvdata) == len(other.uvdata))
self_copy = copy.deepcopy(self)
self_copy.uvdata = self.uvdata - other.uvdata
self_copy.sync()
return self_copy
def multiply(self, x, inplace=False):
"""
Multiply visibilities on a scalar.
:param x:
:return:
"""
if inplace:
self.uvdata = x * self.uvdata
self.sync()
return self
else:
self_copy = copy.deepcopy(self)
self_copy.uvdata = x * self.uvdata
self_copy.sync()
return self_copy
# TODO: TEST ME!!!
# TODO: Do i need the possibility of multiplying on any complex number?
# FIXME: After absorbing gains and multiplying on UVData instance some
# entries do contain NaN. Is that because of some data is flagged and no
# gains solution are available for that data?
def __mul__(self, gains):
"""
Applies complex antenna gains to the visibilities of ``self``.
:param gains:
Instance of ``Gains`` class. Or object with ``data`` attribute
that is structured numpy array and has ``dtype``:
dtype=[('start', '<f8'),
('stop', '<f8'),
('antenna', 'int'),
('gains', 'complex', (nif, npol,)),
('weights', '<f8', (nif, npol,))]
:return:
Instance of ``UVData`` class with visibilities multiplyied by
complex antenna gains.
"""
self_copy = copy.deepcopy(self)
assert(self.nif == np.shape(gains.nif))
# TODO: Now we need this to calculating gain * gains*. But try to
# exclude this assertion
assert(self.nstokes == 4)
for t in set(self.hdu.columns[self.par_dict['DATE']].array):
# Find all uv-data entries with time t:
indxs = np.where(self.hdu.columns[self.par_dict['DATE']].array
== t)[0]
# Loop through uv_indxs (different baselines with the same ``t``)
# and multiply visibility with baseline ant1-ant2 to
# gain(ant1)*gain(ant2)^*.
for indx in indxs:
bl = self.hdu.columns[self.par_dict['BASELINE']].array[indx]
try:
gains12 = gains.find_gains_for_baseline(t, bl)
# If gains is the instance of ``Absorber`` class
except AttributeError:
gains12 = gains.absorbed_gains.find_gains_for_baseline(t,
bl)
# FIXME: In substitute() ['hands'] then [indxs] does return
# view.
# print "gains12 :"
# print gains12
# Doesn't it change copying? Order of indexing [][] has changed
self_copy.uvdata[indx] *= gains12.T
self_copy.sync()
return self_copy
def zero_data(self):
"""
Method that zeros all visibilities.
"""
self.uvdata = np.zeros(np.shape(self.uvdata), dtype=self.uvdata.dtype)
def zero_hands(self, hands):
"""
Method that zeros hands (RR, LL, RL or LR) visibilities.
:param hands:
String of correlation. Must be among existing ones in current data.
"""
self._check_stokes_present(hands)
hand_idx = self.stokes_dict_inv[hands]
self.uvdata[:, :, hand_idx] = np.zeros(np.shape(self.uvdata[:, :, 0]),
dtype=self.uvdata.dtype)
self.sync()
def cv(self, q, fname):
"""
Method that prepares training and testing samples for q-fold
cross-validation.
Inputs:
:param q:
Number of folds for cross-validation.
:param fname:
Base of file names for output the results.
:return:
``q`` pairs of files (format that of ``IO`` subclass that loaded
current instance of ``UVData``) with training and testing samples
prepaired in a such way that 1/``q``- part of visibilities from
each baseline falls in testing sample and other part falls in
training sample.
"""
# List of lists of ``q`` blocks of each baseline
baselines_chunks = list()
# Split data of each baseline to ``q`` blocks
for baseline in self.baselines:
baseline_indxs = np.where(self.hdu.columns[self.par_dict['BASELINE']].array ==
baseline)[0]
# Shuffle indexes
np.random.shuffle(baseline_indxs)
# Indexes of ``q`` nearly equal chunks. That is list of ``q`` index
# arrays
q_indxs = np.array_split(baseline_indxs, q)
# ``q`` blocks for current baseline
baseline_chunks = [list(indx) for indx in q_indxs]
baselines_chunks.append(baseline_chunks)
# Combine ``q`` chunks to ``q`` pairs of training & testing datasets
for i in range(q):
print(i)
# List of i-th chunk for testing dataset for each baseline
testing_indxs = [baseline_chunks[i] for baseline_chunks in
baselines_chunks]
# List of "all - i-th" chunk as training dataset for each baseline
training_indxs = [baseline_chunks[:i] + baseline_chunks[i + 1:] for
baseline_chunks in baselines_chunks]
# Combain testing & training samples of each baseline in one
testing_indxs = np.sort([item for sublist in testing_indxs for item
in sublist])
training_indxs = [item for sublist in training_indxs for item in
sublist]
training_indxs = [item for sublist in training_indxs for item in
sublist]
# Save each pair of datasets to files
# NAXIS changed!!!
training_data=self.hdu.data[training_indxs]
testing_data =self.hdu.data[testing_indxs]
self.save(data=training_data,
fname=fname + '_train' + '_' + str(i + 1).zfill(2) + 'of'
+ str(q) + '.FITS')
self.save(data=testing_data,
fname=fname + '_test' + '_' + str(i + 1).zfill(2) + 'of' +
str(q) + '.FITS')
# TODO: Refactor to general eatimating score (RMS) of ``Model`` instance of
# ``self.``
def cv_score(self, model, average_freq=True, baselines=None):
"""
Method that returns cross-validation score for ``self`` (as testing
cv-sample) and model (trained on training cv-sample).
:param model:
Model to cross-validate. Instance of ``Model`` class.
:param average_freq: (optional)
Boolean - average IFs before CV score calculation? (default:
``True``)
:return:
Cross-validation score between uv-data of current instance and
model for stokes ``I``.
"""
baselines_cv_scores = list()
# noise = self.noise_diffs(average_bands=average_freq)
data_copied = copy.deepcopy(self)
data_copied.substitute([model])
data_copied = self - data_copied
if average_freq:
uvdata = data_copied.uvdata_freq_averaged
else:
uvdata = data_copied.uvdata_weight_masked
if baselines is None:
baselines = self.baselines
for baseline in baselines:
# square difference for each baseline, divide by baseline noise
# and then sum for current baseline
indxs = data_copied._indxs_baselines[baseline]
hands_diff = uvdata[indxs]
# if average_freq:
# hands_diff = uvdata[indxs] / noise[baseline]
# else:
# hands_diff = uvdata[indxs] / noise[baseline][None, :, None]
# Construct difference for Stokes ``I`` parameter
diff = 0.5 * (hands_diff[..., 0] + hands_diff[..., 1])
# print np.shape(hands_diff)
# diff = hands_diff[..., 0]
diff = diff.flatten()
diff *= np.conjugate(diff)
try:
baselines_cv_scores.append(float(diff.sum())/np.count_nonzero(~diff.mask[..., :2]))
except ZeroDivisionError:
continue
return sum(baselines_cv_scores)
# TODO: Add method for inserting D-terms into data. Using different
# amp/phase for each antenna/IF. Then wrap this method to add residual
# D-terms errors to data in ``bootstrap``. Possibly use class for D-terms.
def rotate_evpa(self, angle):
"""
Rotate EVPA of linear polarization on ``angle`` [rad].
"""
self._check_stokes_present("RL")
self._check_stokes_present("LR")
q = self._choose_uvdata(stokes="Q")
u = self._choose_uvdata(stokes="U")
q_ = q*np.cos(2.*angle)+u*np.sin(2.*angle)
u_ = -q*np.sin(2.*angle)+u*np.cos(2.*angle)
# FIXME: Use self._get_uvdata_slice to get indexes of RL and LR?
self.uvdata[:, :, self.stokes_dict_inv["RL"]] = (q_+1j*u_)[..., 0]
self.uvdata[:, :, self.stokes_dict_inv["LR"]] = (q_-1j*u_)[..., 0]
self.sync()
# TODO: Use for-cycle on baseline indexes
def substitute(self, models, baselines=None):
"""
Method that substitutes visibilities of ``self`` with model values.
:param models:
Iterable of ``Model`` instances that substitute visibilities of
``self`` with it's own. There should be only one (or zero) model for
each stokes parameter. If there are two, say I-stokes models, then
sum them firstly using ``Model.__add__``.
:param baselines (optional):
Iterable of baselines on which to substitute visibilities. If
``None`` then substitute on all baselines.
(default: ``None``)
"""
if baselines is None:
baselines = self.baselines
# Indexes of hdu.data with chosen baselines
# FIXME: Possibly use ``_get_uvdata_slice`` here and everywhere
indxs = np.hstack(index_of(baselines, self.hdu.columns[self.par_dict['BASELINE']].array))
n = len(indxs)
uv = self.uvw[indxs, :2]
uv_correlations = get_uv_correlations(uv, models)
for i, hand in self.stokes_dict.items():
try:
self.uvdata[indxs, :, i] = \
uv_correlations[hand].repeat(self.nif).reshape((n, self.nif))
self.sync()
# If model doesn't have some hands => pass it
except KeyError:
pass
# TODO: convert time to datetime format and use date2num for plotting
# TODO: make a kwarg argument - to plot in different symbols/colors
def tplot(self, baselines=None, bands=None, stokes=None, style='a&p',
freq_average=False, sym=None, start_time=None, stop_time=None):
"""
Method that plots uv-data vs. time.
:param baselines: (optional)
Iterable of baselines numbers or ``None``. If ``None`` then
use all baselines. (default: ``None``)
:parm bands: (optional)
Iterable of IF numbers (0-#IF-1) or ``None``. If ``None`` then
use all IFs. (default: ``None``)
:param stokes: (optional)
Any string of: ``I``, ``Q``, ``U``, ``V``, ``RR``, ``LL``, ``RL``,
``LR`` or ``None``. If ``None`` then use ``I``.
(default: ``None``)
:param style: (optional)
How to plot complex visibilities - real and imaginary part
(``re&im``) or amplitude and phase (``a&p``). (default: ``a&p``)
:param start_time: (optional)
Instance of ``astropy.time.Time`` class. (default: ``None``)
:param stop_time: (optional)
Instance of ``astropy.time.Time`` class. (default: ``None``)
.. note:: All checks are in ``_choose_uvdata`` method.
"""
if not pylab:
raise Exception('Install ``pylab`` for plotting!')
if not stokes:
stokes = 'I'
uvdata = self._choose_uvdata(baselines=baselines, bands=bands,
stokes=stokes, freq_average=freq_average,
start_time=start_time, stop_time=stop_time)
times_indxs = self._get_times_indexes(start_time, stop_time)
times = self.times[times_indxs]
if style == 'a&p':
a1 = np.angle(uvdata)
a2 = np.real(np.sqrt(uvdata * | np.conj(uvdata) | numpy.conj |
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
import numpy as np
from scipy.spatial.distance import pdist
from pm4py.algo.discovery.dfg import algorithm as dfg_algorithm
from pm4py.statistics.attributes.log import get as attributes_filter
import pandas as pd
from pm4py.algo.clustering.trace_attribute_driven.variants import act_dist_calc
def dfg_dist_calc_act(log1, log2):
act1 = attributes_filter.get_attribute_values(log1, "concept:name")
act2 = attributes_filter.get_attribute_values(log2, "concept:name")
df1_act = act_dist_calc.occu_var_act(act1)
df2_act = act_dist_calc.occu_var_act(act2)
df_act = pd.merge(df1_act, df2_act, how='outer', on='var').fillna(0)
dist_act = pdist(np.array([df_act['freq_x'].values, df_act['freq_y'].values]), 'cosine')[0]
return dist_act
def dfg_dist_calc_suc(log1, log2):
dfg1 = dfg_algorithm.apply(log1)
dfg2 = dfg_algorithm.apply(log2)
df1_dfg = act_dist_calc.occu_var_act(dfg1)
df2_dfg = act_dist_calc.occu_var_act(dfg2)
df_dfg = pd.merge(df1_dfg, df2_dfg, how='outer', on='var').fillna(0)
dist_dfg = pdist(np.array([df_dfg['freq_x'].values, df_dfg['freq_y'].values]), 'cosine')[0]
return dist_dfg
def dfg_dist_calc(log1, log2):
act1 = attributes_filter.get_attribute_values(log1, "concept:name")
act2 = attributes_filter.get_attribute_values(log2, "concept:name")
dfg1 = dfg_algorithm.apply(log1)
dfg2 = dfg_algorithm.apply(log2)
df1_act = act_dist_calc.occu_var_act(act1)
df2_act = act_dist_calc.occu_var_act(act2)
df1_dfg = act_dist_calc.occu_var_act(dfg1)
df2_dfg = act_dist_calc.occu_var_act(dfg2)
df_act = pd.merge(df1_act, df2_act, how='outer', on='var').fillna(0)
df_dfg = pd.merge(df1_dfg, df2_dfg, how='outer', on='var').fillna(0)
dist_act = pdist( | np.array([df_act['freq_x'].values, df_act['freq_y'].values]) | numpy.array |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import inspect
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from astropy import units as u
from astropy.units.quantity_helper.function_helpers import (
ARRAY_FUNCTION_ENABLED, SUBCLASS_SAFE_FUNCTIONS, UNSUPPORTED_FUNCTIONS,
FUNCTION_HELPERS, DISPATCHED_FUNCTIONS, IGNORED_FUNCTIONS)
from astropy.utils.compat import (
NUMPY_LT_1_14, NUMPY_LT_1_15, NUMPY_LT_1_16, NUMPY_LT_1_18)
NO_ARRAY_FUNCTION = not ARRAY_FUNCTION_ENABLED
# To get the functions that could be covered, we look for those that
# are wrapped. Of course, this does not give a full list pre-1.17.
all_wrapped_functions = {name: f for name, f in np.__dict__.items()
if callable(f) and hasattr(f, '__wrapped__') and
(NUMPY_LT_1_15 or f is not np.printoptions)}
all_wrapped = set(all_wrapped_functions.values())
class CoverageMeta(type):
"""Meta class that tracks which functions are covered by tests.
Assumes that a test is called 'test_<function_name>'.
"""
covered = set()
def __new__(mcls, name, bases, members):
for k, v in members.items():
if inspect.isfunction(v) and k.startswith('test'):
f = k.replace('test_', '')
if f in all_wrapped_functions:
mcls.covered.add(all_wrapped_functions[f])
return super().__new__(mcls, name, bases, members)
class BasicTestSetup(metaclass=CoverageMeta):
"""Test setup for functions that should not change the unit.
Also provides a default Quantity with shape (3, 3) and units of m.
"""
def setup(self):
self.q = np.arange(9.).reshape(3, 3) / 4. * u.m
class InvariantUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
o = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs) * self.q.unit
assert o.shape == expected.shape
assert np.all(o == expected)
class NoUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, *kwargs)
assert type(out) is type(expected)
if isinstance(expected, tuple):
assert all(np.all(o == x) for o, x in zip(out, expected))
else:
assert np.all(out == expected)
class TestShapeInformation(BasicTestSetup):
# alen is deprecated in Numpy 1.8
if NUMPY_LT_1_18:
def test_alen(self):
assert np.alen(self.q) == 3
def test_shape(self):
assert np.shape(self.q) == (3, 3)
def test_size(self):
assert np.size(self.q) == 9
def test_ndim(self):
assert np.ndim(self.q) == 2
class TestShapeManipulation(InvariantUnitTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (9, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
q = 1. * u.m
o, so = np.atleast_1d(q, self.q)
assert o.shape == (1,)
assert o == q
expected = np.atleast_1d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_2d(self):
q = 1. * u.m
o, so = np.atleast_2d(q, self.q)
assert o.shape == (1, 1)
assert o == q
expected = | np.atleast_2d(self.q.value) | numpy.atleast_2d |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 12 14:24:38 2019
@author: thomas
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from numpy.polynomial.polynomial import polyval
import libconstants as const
import time
import random
# exponential response function - used for testing
def expres(a,t):
x = np.zeros(t.size)
i = np.where(t >= 0)
x[i] = a*np.exp(-a*t[i])
return(x)
def calcfreqaxis(t):
# calculate frequency axis
Dt = t[1]-t[0]
Nt = t.size
Dfs = 1.0/(Nt*Dt)
freqaxis = np.arange( -Nt/2.0, Nt/2.0, 1.0) * Dfs
return(freqaxis)
def rms(x):
"""
Calculate RMS value of signal
"""
S=np.sum(np.abs(x)**2.0) / x.size
return np.sqrt(S)
# analog Fourier transform via FFT
def spec(t,x):
Dt = t[1]-t[0]
Nt = t.size
Df = 1.0/(Nt*Dt)
f = np.arange( -Nt/2.0, Nt/2.0, 1.0) * Df
X = Dt * np.fft.fftshift( np.fft.fft (np.fft.fftshift(x) ))
return f,X
# inverse analog Fourier transfrom via IFFT
def invspec(f,X):
Df = f[1]-f[0]
Nf = f.size
Dt = 1.0/(Nf*Df)
t = np.arange( -Nf/2.0, Nf/2.0, 1.0) * Dt
x = Nf * Df * np.fft.fftshift( np.fft.ifft (np.fft.fftshift(X) ))
return t,x
# convert digital signal to analog
def converttoanalog(t,din,Ts,t0=0.0,gfilterbandwidth=None):
t=t-t0
m=np.round( t/Ts ).astype(int)
N=din.size
x=np.zeros(t.size)
i=np.where( (m>=0) & (m < N) )
x[i]=din[m[i]]
if gfilterbandwidth!=None:
f,P=spec(t,x)
H=np.exp(-f**2.0/2/gfilterbandwidth**2)
Q=P*H
_,x=invspec(f,Q)
return(x)
# sample analog waveform
def sample(t,x,Ts,toffset=0.0,tinitial=None,tduration=None):
if tinitial == None:
tinitial = np.min(t)
if tduration == None:
tduration = np.max(t) - np.min(t)
# find time instances within the specified interval
ts = t[ (t>=tinitial) & (t<tinitial + tduration) ]
# subtract to set the first time instance at t=0
ts = ts - tinitial
# obtain the corresponding values of the analog waveform
xs= x[ (t>=tinitial) & (t<tinitial + tduration) ]
# find in which sample duration the values of the time axis correspond
m = np.floor( ts/Ts ).astype(int)
# sampling times
tout = m*Ts
tout = np.unique(tout) + toffset
# sample by interpolation
dout = np.interp(tout,ts,xs)
# remember to reset the time axis
# check wheter we exceed the maximum duration
dout = dout[(tout >= tinitial) & (tout < tinitial + tduration)]
tout = tout[(tout >= tinitial) & (tout < tinitial + tduration)]
return(tout,dout)
# provide complex conjugate symmetry so that the IFFT is real
def addconjugates(din):
N=din.size
# ensure DC component is real
din[0]=np.real(din[0])
# calculate conjugate block
conjblock=np.flip(np.conj(din[1:]))
# new block to contain the conjugates
dout=np.zeros(2*N) + 1j * np.zeros(2*N)
# original part
dout[0:N]=din
# conjugate part
dout[N+1:]=conjblock
# Nth component must be real
dout[N]=din[0]
return(dout)
# Generate bit sequences for gray code of order M
def graycode(M):
if (M==1):
g=['0','1']
elif (M>1):
gs=graycode(M-1)
gsr=gs[::-1]
gs0=['0'+x for x in gs]
gs1=['1'+x for x in gsr]
g=gs0+gs1
return(g)
# convert stream of bits to bit blocks of size Mi. If Mi is a numpy array the process is repeated cyclically.
def bitblockscyc(b,Mi):
blocks=[]
fullrepetitions=0
curr=0
bitsleft=b
while len(bitsleft) >= Mi[curr]:
currbits=bitsleft[0:Mi[curr]]
bitsleft=bitsleft[Mi[curr]:]
blocks.append(currbits)
curr=curr+1
if curr>=Mi.size:
curr=0
fullrepetitions=fullrepetitions+1
return blocks,bitsleft,fullrepetitions
# convert stream of bits to bit blocks of size Mi. If Mi is a numpy array the process is repeated cyclically. Blocks are arranged in two dimensions
def bitblockscyc2D(b,Mi):
blocks=[]
# initialize empty blocks for each value of Mi
for mi in Mi:
blocks.append([])
fullrepetitions=0
curr=0
bitsleft=b
while len(bitsleft) >= Mi[curr]:
currbits=bitsleft[0:Mi[curr]]
bitsleft=bitsleft[Mi[curr]:]
blocks[curr].append(currbits)
curr=curr+1
if curr>=Mi.size:
curr=0
fullrepetitions=fullrepetitions+1
return blocks,bitsleft,fullrepetitions
def counterrors(b1,b2):
"""
Count errors between bit sequences b1 and b2
"""
b1=bitstrtobits(b1)
b2=bitstrtobits(b2)
diff = np.abs(b1-b2)
errors=np.sum(diff).astype(int)
return(errors)
def bitstrblockstobitstr(blocks):
return ''.join(blocks)
# convert stream of bits to bit blocks of size Mi. If Mi is a numpy array the process is NOT repeated cyclically!!!
def bitblocks(b,Mi):
blocks=[]
curr=0
bitsleft=b
toread=Mi[curr]
while len(bitsleft) >= toread:
currbits=bitsleft[0:Mi[curr]]
bitsleft=bitsleft[Mi[curr]:]
blocks.append(currbits)
curr=curr+1
if (curr<Mi.size):
toread=Mi[curr]
else:
break
return blocks,bitsleft,curr
# convert a set of np.array bits to bit string
def bitstobitstr(b):
bitstr=''
for bi in b:
bitstr=bitstr+str(bi)
return(bitstr)
# convert a bit string to an np.array
def bitstrtobits(b):
bits=np.zeros(len(b))
for i,v in enumerate(b):
bits[i]=int(v)
return(bits)
# plot bits
def visualizebitblock(bitsb,zoomfrom=None,zoomto=None):
fig=plt.figure()
start=1
marker='ro'
color='r'
if isinstance(bitsb,str):
bitsb=[bitsb]
for b in bitsb:
bits=bitstrtobits(b)
end=start+bits.size
x=np.arange(start,end)
plt.stem(x,bits,linefmt=color,markerfmt=marker,use_line_collection=True,basefmt=" ")
if marker=='ro':
marker='bo'
color='b'
else:
marker='ro'
color='r'
start=end
if zoomfrom!=None:
start=zoomfrom
else:
start=1
if zoomto!=None:
end=zoomto
plt.xlim([start,end])
# PAM symbol dictionary
def pamsymbols(M):
m=np.arange(0,M)
symbols=2*m-M+1
return(symbols)
# PAM symbol at index m
def pamsymbol(m,M):
return(2*m-M+1)
def qammapeven(order=16):
"""
QAM Constellation for order = 2^(2n)
"""
m = np.log2(order).astype(int)
Ms = np.sqrt(order)
gc = graycode( m/2 )
forward = {} # bits to symbols
backward = np.zeros(order) + 1j * np.zeros(order)
for i,gi in enumerate(gc):
for j,gj in enumerate(gc):
q = np.complex(pamsymbol(i,Ms),pamsymbol(j,Ms))
forward[gi+gj] = q
indx = int( gi+gj , 2 )
backward[indx] = q
return forward, backward
def qammapodd(order=32):
"""
Map bit to QAM symbols for M=2^(2n+1) orderings
"""
forward = {} # bits to symbols
backward = np.zeros(order) + 1j * np.zeros(order)
m = np.log2(order).astype(int)
if m % 2 == 1:
l = (m-1)/2+1
s = (m-1)/2
l = l.astype(int)
Gl = graycode( l )
Gs = graycode( s )
n = ((m-1) / 2).astype(int)
# Start from a (m+1) x m configuration
Q = np.zeros([2**n,2**(n+1)]) + 1j * np.zeros([2**n,2**(n+1)])
bits = []
for my in range(0,2**n):
B = []
for mx in range(0,2**(n+1)):
Q[my,mx] = (2**(n+1) - 2*mx - 1) +1j * (2**n - 2*my - 1)
B.append( Gl[mx] + Gs[my])
bits.append(B)
# Transform constellation
s = 2 ** ( s-1 )
for my in range(0,2**n):
for mx in range(0,2**(n+1)):
q=Q[my,mx]
b=bits[my][mx]
irct = np.real( q )
qrct = np.imag( q )
if np.abs( irct ) < 3 * s:
i = irct
q = qrct
elif np.abs(np.imag(q)) > s:
i = np.sign( irct ) * (np.abs(irct) - 2*s)
q = np.sign( qrct ) * (4*s - np.abs(qrct))
else:
i = np.sign( irct ) * (4*s - np.abs(irct))
q = np.sign( qrct ) * (np.abs(qrct) + 2*s)
forward[b] = i + 1j *q
indx = int( b , 2 )
backward[indx] = forward[b]
return forward, backward
def qammap(order=16):
"""
Map bits to QAM symbols
"""
m = np.log2(order).astype(int)
# is this a rectangular shaped QAM ?
if m % 2 == 0:
forward,backward = qammapeven(order=order)
else:
forward,backward = qammapodd(order=order)
avgpower = np.mean( np.abs (backward) ** 2.0 )
forwardn = {}
backwardn = np.zeros(order) + 1j * np.zeros(order)
s = np.sqrt(avgpower)
for x in forward:
forwardn[x] = forward[x] / s
backwardn = backward / s
return forward,backward,forwardn,backwardn,s
def findclosestanddecode(s,backwardmap):
"""
Find closest symbol and decode
"""
N = np.log2(backwardmap.size).astype(int)
p = np.abs(backwardmap - s).argmin()
sc = backwardmap[p]
b = np.binary_repr(p,N)
return sc, b
# add cp to symbol sequence
def addcp(s,cplength):
last=s.size
start=last-cplength
scp=np.concatenate((s[start:last],s))
return(scp)
"""
Shortcut for converting an element
"""
def makelist(arg,N):
if not(isinstance(arg,list)):
return([arg] * N)
else:
return(arg)
"""
DMT physical layer class
"""
def noise(t,f=None,psd=None):
"""
Add colored or white noise at the receiver
"""
if psd is None:
psd = lambda x: 1
if not callable(psd):
psd = lambda x: np.interp(x,f,psd)
f = calcfreqaxis(t)
H = psd(f)
Hf = np.sqrt(H)
r = np.random.randn(t.size)
R = np.fft.fft(r)
R = R * Hf
x = np.fft.fftshift( np.fft.ifft( np.fft.fftshift(R) ) )
return( np.real(x) )
class dmtphy:
class timings:
total = 0.0
exectimes = {}
def __init__(self,
nocarriers=16, # number of subcarrier channels
M=64, # QAM order
noframes=16, # frames to be considered
Df=1e6, # subcarrier spacing
cpsize=5, # size of CP prefix
samplespersymbol=40, # samples per symbol duration
tapsize=20, # tapsize for channel coefficients
psd = None, # noise psd,
trfcallable = None,
sampleoffset=0, # sampling offset at the receiver.
# 0 indicates no offset
# 0.5 Ts/2 offset
# 1 Ts offset
scales = None, # power scales for the carriers, sum of squares must add up to nocarriers
cliplevel = None, # clipping ratio
dacfilterbandwidth=None, # filter of the DAC
polynl = np.array([0, 1]) # nonlinearity polynomial coefficients
):
self.debug = False #change to true if we require debuging
self.timecode = False #change to true if you want to time the execution
'''
Transmitter characteristics
'''
self.ommitzero = True # ignore the zeroth subcarrier
if isinstance(M,int) or isinstance(M,float):
M = M * np.ones(nocarriers)
self.M = M.astype(int) # Modulation order for each subcarrier
self.bin = '' # input bits
self.cpsize = cpsize # size of cyclic prefix
self.noframes = int(2* round(noframes /2)) # number of DMT frames - must be an even number
self.nocarriers = nocarriers # number of carriers
self.t0 = 0 # time in the analog time axis where we assume that the
# frames start being transmitted
self.samplespersymbol = samplespersymbol # samples per symbol in the analog waveform
self.dacfilterbandwidth = None # filter bandwidth at the output of the DAC
self.framesbefore = 20 # guard period before frame transmission (empty frames)
self.framesafter = 20 # guard period after frame transmission (empty frames)
self.carriermodulation = 'qam' # modulation in carriers
self.forwardmaps = None # forward symbol map for carriers bits -->> symbols
self.backwardmaps = None # backward symbol map for carriers symbols -->> bits
self.forwardmaps = None # normalized forward symbol map for carriers bits -->> symbols
self.backwardmaps = None # normalized backward symbol map for carriers symbols -->> bits
self.sic = None # symbols assigned to carriers
self.sicun = None # unscaled symbols assigned to carriers
self.bic = None # bits assigned to carriers
self.txframeswithcp = None # frames at TX with CP
self.txs = None # output symbol sequence fed at the transmitter DAC
self.txsunclipped = None # unclipped waveform samples at the DAC output
self.analogtx = None # analog waveform at TX ourput
self.scales = scales
self.Df = Df # subcarrier spacing
self.Ts = 1.0/Df/(2.0*nocarriers) # sample duration
self.txinputifftframes = None # input blocks at IFFT input
self.txoutputifftframes = None # output blocks at IFFT input
self.removeimags = True # removes imaginary parts from IFFT output
self.framesamples = cpsize+2*nocarriers # samples per frame
self.Tframe = (cpsize+2*nocarriers)*self.Ts # duration of the DMT frames
self.Tsignal = self.Tframe*self.noframes # duration of the DMT signal (without guard periods)
self.anarlogt = None # analog time
self.centertimeaxis = True # Center the analog time axis
self.analogtx = None # analog waveform at the output of the transmitter
self.analogtxspec = None # analog spectrum at the output of the transmitter
if scales is None:
self.scales = np.ones( self.nocarriers )
else:
self.scales = scales / np.sum( scales ) * self.nocarriers
if dacfilterbandwidth is None:
self.dacfilterbandwidth = 3.0/self.Ts
else:
self.dacfilterbandwidth = dacfilterbandwidth
self.normalizesymbols = True # normalize symbols so that the average energy is equal to one?
self.scalesforcarriers = None # scales required for symbol normalization
self.crestfactor = None # Crest factor of the transmitter samples
self.nobits = None # number of bits to be transmitted
self.cliplevel = cliplevel # Clipping level in dB
self.Aclip = None # Amplitude corresponding to the clipping level
self.DC = None # DC level at the transmitter
self.Amax = None # specified maximum signal amplitude at the transmitter after adding DC component
self.polynl = polynl # nonlinearity polynomial
'''
Channel characteristics
'''
self.taps=None # digital channel taps
self.tapssamplesperTs=100 # samples per sample duration when calculating the taps
self.tapsguardperiod=20 # defines guard period when calculating the taps
self.freqaxis=None # frequency axis
self.trf=None # transfer functon of the channel
self.ht=None # analog channel impulse response
self.tapsize=tapsize # number of taps
self.trfcallable = trfcallable # callable function for the transfer function of the channel
self.psd = psd # noise psd to be added at the receiver input
'''
Receiver Characteristics
'''
self.analogrx=None # analog waveform at the receiver input
self.analogrxspec=None # analog spectrum at the input of the receiver
self.rxs=None # samples at the input of the receiver
self.toffset=sampleoffset*self.Ts # time offset for sampling at the receiver
self.ts=None # times in which the analog receiver signal is sampled
self.rxsd=None # the samples at the input of the receiver calculated using the digital channel approach
self.rxframeswithcp=None # received DMT frames containing the cyclic prefix
self.rxinputfftframes=None # received DMT frames without the cyclic prefix
self.rxoutputfftframes=None # frames at the output of the FFT block
self.rxsic=None # symbols assigned to carriers
self.eqtaps=None # equalization taps. If None then simply use the inverse of the channel taps in the frequency domain
self.rxsic=None # symbols obtained at RX subcarrier channels
self.rxsicun=None # unscaled symbol estimates (original constellation)
self.siest=None # symbol estimates after hard decoding
self.rxbic=None # bit estimates at subcarriers
self.bout=None # bits obtained at the output of the receiver
self.berrors=None # bit errors
self.berrorsinc=None # bit errors in carrier channels
self.snr=None # Receive SNR at the various carrier channels
'''
Simulation Sequences
'''
# self.seqdig = ['setrandombits','setsymbolmaps','setcarriersymbols','calcifftinput',
# 'calcifftoutput','calccptxframes','calctxsymbols',
# 'cliptxsamples','normalizetxs','makedc','applytxnl','calctaps',
# 'applydigitalchannel','removeDC','calcrxframes',
# 'removecprxframes','calcfftoutput','calcrxcarriersamples',
# 'calcrxestimates','calcrxbits','calcerrors','calcsnrevm','calcber'
# ]
self.seqdig = ['setrandombits','setsymbolmaps','setcarriersymbols','calcifftinput',
'calcifftoutput','calccptxframes','calctxsymbols',
'cliptxsamples','normalizetxs','makedc','applytxnl','calctaps',
'applydigitalchannel','normalizerxs','calcrxframes','removeDC',
'removecprxframes','calcfftoutput','calcrxcarriersamples',
'calcrxestimates','calcrxbits','calcerrors','calcsnrevm','calcber'
]
self.seqanl = ['setrandombits','setsymbolmaps','setcarriersymbols','calcifftinput',
'calcifftoutput','calccptxframes','calctxsymbols',
'cliptxsamples','normalizetxs','makedc','applytxnl','calctaps','calctxwaveform','setchanneltrf',
'applyanalogchannel','calcadcoutput','removeDC','calcrxframes',
'removecprxframes','calcfftoutput','calcrxcarriersamples',
'calcrxestimates','calcrxbits','calcerrors','calcsnrevm'
]
# define the set of input bits, argument is a np array
def setinputbits(self,bi):
self.bin=bitstobitstr(bi)
# define the set of input bits, argument is a bit string
def setinputbitstr(self,bistr):
self.bin=bistr
def calcnumberofbits(self):
"""
Calculate number of bits to be transmitted
"""
# do we exclude the zeroth subcarrier?
if self.ommitzero:
bitsperframe = sum(np.log2(self.M[1:]).astype(int))
else:
bitsperframe = sum(np.log2(self.M).astype(int))
Nbits=bitsperframe*self.noframes
self.nobits = Nbits
# assign random bits corresponding to the required frames
def setrandombits(self):
self.calcnumberofbits()
bstr = ''.join(random.choice(['0','1']) for i in range(self.nobits))
self.setinputbitstr(bstr)
self.datarate = self.nobits / self.Tsignal
# set bits to carriers
def setcarrierbitstr(self,blockstr):
# check out dimensions of blockstr
blockpercarrier=len(blockstr[0])
# if we ommit the zeroth subcarrier then assign no bits to it
if self.ommitzero:
block2=[''] * blockpercarrier
blockstr2=[block2]
blockstr2.extend(blockstr)
else:
blockstr2=blockstr
self.bic=blockstr2
# read input bit sequence and assign symbol sequences to subcarriers - removes bits from input bit stream
def setbitstocarriers(self):
# check if we need to ommit the zeroth subcarrier
if self.ommitzero:
nobitspercarrier = np.log2(self.M[1:]).astype(int)
else:
nobitspercarrier = np.log2(self.M).astype(int)
# read the bits
blocks,bitsremaining,noframes=bitblockscyc2D(self.bin,nobitspercarrier)
# assign bit blocks to carriers
self.setcarrierbitstr(blocks)
def setsymbolmaps(self):
"""
Set up symbol maps for subcarriers
"""
self.backwardmaps = []
self.forwardmaps = []
self.backwardmapsn = []
self.forwardmapsn = []
self.scalesforcarriers = np.zeros( self.nocarriers )
for i in range(0,self.nocarriers):
fm,bm,fmn,bmn,s = qammap( self.M[i] )
self.backwardmaps.append( bm )
self.forwardmaps.append( fm )
self.backwardmapsn.append( bmn )
self.forwardmapsn.append( fmn )
self.scalesforcarriers[i] = s
# assign symbols to carriers by reading the input bits - removes bits from input bit stream
def setcarriersymbols(self,debug=False):
# assign bits to carriers
self.setbitstocarriers()
# create array for symbol storage.
self.sic = np.zeros([self.nocarriers,self.noframes]) + 1j * np.zeros([self.nocarriers,self.noframes])
self.sicun = np.zeros([self.nocarriers,self.noframes]) + 1j * np.zeros([self.nocarriers,self.noframes])
for nc in range(0,self.nocarriers):
blocks=self.bic[nc]
if debug:
print('Carrier: %d) has modulation order %d and blocks:' %(nc,self.M[nc]))
print(blocks)
for ib,block in enumerate(blocks):
# Check for subcarrier modulation
if self.carriermodulation == 'qam':
if block != '':
q = self.forwardmaps[nc][block]
qn = self.forwardmapsn[nc][block]
else:
q = 0
qn = 0
self.sic[nc,ib] = qn
self.sicun[nc,ib] = q
if debug:
print('Carrier %d,Block %d bit sequence %s corresponds to symbol %6.2f+j%6.2f' %(nc,ib,block,np.real(q),np.imag(q)))
if debug:
print('\n')
# calculate input frames to the ifft block of the transmitter
def calcifftinput(self):
self.txinputifftframes = []
for nf in range(0,self.noframes):
frame = self.sic[:,nf]
self.txinputifftframes.append( addconjugates( np.sqrt(self.scales) * frame ))
# calculate out frames of the ifft block at the transmitter
def calcifftoutput(self):
self.txoutputifftframes = []
for frame in self.txinputifftframes:
ifftout = np.fft.ifft ( frame )
if self.removeimags:
self.txoutputifftframes.append ( np.real (ifftout) )
else:
self.txoutputifftframes.append ( ifftout )
def calccptxframes(self):
"""
add cyclic prefix to frames
"""
self.txframeswithcp = []
for i,frame in enumerate(self.txoutputifftframes):
self.txframeswithcp.append(addcp(frame,self.cpsize))
def calctxsymbols(self):
"""
calculate output symbol sequence to be fed to the TX DAC
"""
self.txs=self.txframeswithcp[0]
if self.noframes > 0:
for i in range(1,self.noframes):
self.txs=np.concatenate((self.txs,self.txframeswithcp[i]))
self.powertx = np.mean( np.abs( self.txs ) ** 2.0 ) # power of the digital signal
def cliptxsamples(self):
"""
Clip the samples at the TX output
"""
if not (self.cliplevel is None):
s = self.powertx
R = 10.0 ** (self.cliplevel/10.0)
A = np.sqrt(R * s)
self.Aclip = A
i=np.where( np.abs(self.txs) > self.Aclip)
self.txsunclipped = np.copy(self.txs)
self.txs[i] = self.Aclip * np.sign( self.txs[i] )
def normalizetxs(self):
"""
Normalize transmitted samples so that they fall inside [-1, 1]
"""
self.txsu = np.copy(self.txs)
self.txs = self.txs / self.Aclip
def applytxnl(self):
"""
Apply nonlinearity polynomial at the transmitter side
"""
# linear version of the transmitted samples
self.txsl = np.copy(self.txs)
# apply nonlinearity
self.txs = polyval(self.txs, self.polynl)
def makedc(self):
"""
Renders the ouput waveform to DC waveform
Normalizes the output of the transmitter so that it corresponds to an output level
determined by self.Amax
"""
self.txsac = np.copy(self.txs)
if self.Aclip is None:
self.DC = np.max( | np.abs(self.txs) | numpy.abs |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(202, 'F m -3', transformations)
space_groups[202] = sg
space_groups['F m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(203, 'F d -3 :2', transformations)
space_groups[203] = sg
space_groups['F d -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(204, 'I m -3', transformations)
space_groups[204] = sg
space_groups['I m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(205, 'P a -3', transformations)
space_groups[205] = sg
space_groups['P a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(206, 'I a -3', transformations)
space_groups[206] = sg
space_groups['I a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(207, 'P 4 3 2', transformations)
space_groups[207] = sg
space_groups['P 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(208, 'P 42 3 2', transformations)
space_groups[208] = sg
space_groups['P 42 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = | N.array([-1,0,0,0,0,-1,0,-1,0]) | numpy.array |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import json
import numpy as np
import random
import os
import contextlib
import warnings
# from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from xbbo.utils.constants import MAXINT
warnings.filterwarnings("ignore", category=DeprecationWarning)
from sklearn.cluster import KMeans
from scipy.stats import norm
import copy as cp
from sklearn.svm import SVC
from sklearn.linear_model import Ridge
from torch.quasirandom import SobolEngine
from mpl_toolkits.mplot3d import axes3d, Axes3D
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import ConstantKernel, Matern
import cma
import matplotlib.pyplot as plt
from matplotlib import cm
# from .turbo_1.turbo_1 import Turbo1
class WrappedSVC(SVC):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.flip_predictions = False
def predict(self, X):
preds = super().predict(X)
if self.flip_predictions:
return 1 - preds
else:
return preds
# the input will be samples!
class Classifier():
def __init__(self,
real_samples,
samples,
sample_dims,
split_dims,
true_dims,
kernel_type,
cmaes_sigma_mult,
leaf_size,
splitter_type,
split_metric='max',
use_gpr=True,
gamma_type="auto",
normalize=True,
verbose=False,
split_use_predict=True,
rng=np.random.RandomState(42),
dynamic_C=True,
**kwargs):
self.training_counter = 0
assert sample_dims >= 1
assert split_dims >= 1
assert true_dims >= 1
assert type(samples) == type([])
assert len(real_samples) == len(samples)
# self.args = args
self.sample_dims = sample_dims
self.split_dims = split_dims
self.true_dims = true_dims
self.kernel_type = kernel_type
self.gamma_type = gamma_type
self.split_metric = split_metric
self.normalize = normalize
self.split_use_predict = split_use_predict
self.dynamic_C = dynamic_C
#create a gaussian process regressor
noise = 0.1
m52 = ConstantKernel(1.0) * Matern(length_scale=1.0, nu=2.5)
self.gpr = GaussianProcessRegressor(kernel=m52,
alpha=noise**2) #default to CPU
self.cmaes_sigma_mult = cmaes_sigma_mult
self.LEAF_SAMPLE_SIZE = leaf_size
self.rng = rng
self.splitter_type = splitter_type
if self.normalize:
if self.splitter_type == 'kmeans':
self.kmean = Pipeline([('sc', StandardScaler()),('cluster',KMeans(n_clusters=2, random_state=self.rng))])
self.svm = Pipeline([('sc', StandardScaler()),('clf',WrappedSVC(kernel=kernel_type,
gamma=gamma_type,
random_state=self.rng))])
#learned boundary
elif self.splitter_type == 'linreg':
self.regressor = Pipeline([('sc', StandardScaler()),('reg',Ridge())])
elif self.splitter_type == 'value':
self.svm = Pipeline([('sc', StandardScaler()),('clf',WrappedSVC(kernel=kernel_type,
gamma=gamma_type,
random_state=self.rng))])
else:
raise NotImplementedError
else:
if self.splitter_type == 'kmeans':
self.kmean = Pipeline([('cluster',KMeans(n_clusters=2, random_state=self.rng))])
self.svm = Pipeline([('clf',WrappedSVC(kernel=kernel_type,
gamma=gamma_type,
random_state=self.rng))])
#learned boundary
elif self.splitter_type == 'linreg':
self.regressor = Pipeline([('clf',Ridge())])
elif self.splitter_type == 'value':
self.svm = Pipeline([('clf',WrappedSVC(kernel=kernel_type,
gamma=gamma_type,
random_state=self.rng))])
else:
raise NotImplementedError
#data structures to store
# self.real_samples = []
# self.samples = []
# self.X = np.array([])
# self.real_X = np.array([])
# self.fX = np.array([])
self.sample_X = np.array([])
self.split_X = np.array([])
self.true_X = np.array([])
self.fX = np.array([])
self.svm_label = None
#good region is labeled as zero
#bad region is labeled as one
self.good_label_mean = -1
self.bad_label_mean = -1
self.update_samples([], [], [], [])
self.use_gpr = use_gpr
self.verbose = verbose
def correct_classes(self, svm_label):
# the 0-1 labels in kmean can be different from the actual
# flip the label is not consistent
# 0: good cluster, 1: bad cluster
self.good_label_metric, self.bad_label_metric = self.get_cluster_metric(
svm_label) # mean by default
if self.bad_label_metric > self.good_label_metric:
for idx in range(0, len(svm_label)):
if svm_label[idx] == 0:
svm_label[idx] = 1
else:
svm_label[idx] = 0
self.svm.steps[-1][-1].flip_predictions = True
self.good_label_metric, self.bad_label_metric = self.get_cluster_metric(
svm_label)
return svm_label
def is_splittable_svm(self):
try:
if self.splitter_type in ['kmeans', 'value']:
plabel = self.learn_clusters()
if plabel.min() == plabel.max():
print('Warning: only 1 cluster')
return False
self.learn_boundary(plabel)
svm_label = self.svm.predict(self.split_X)
# TODO whether is needed
for i in range(10):
if len(np.unique(svm_label)) > 1:
# plabel = svm_label
break
else:
if not self.dynamic_C:
return False
self.svm = WrappedSVC(
C=10**(i + 1),
kernel=self.kernel_type,
gamma=self.gamma_type,
random_state=self.rng
) # retry with less regularization
self.learn_boundary(plabel)
svm_label = self.svm.predict(self.split_X)
if i == 9:
print(
'Warning: svm split failed, using base plabel for splitting'
)
if len(np.unique(svm_label)) == 1:
return False
else:
if self.split_use_predict:
svm_label = self.correct_classes(svm_label)
else:
svm_label = plabel.copy()
self.svm_label = svm_label # save these for reuse later
return True
else:
return True # the check for node size happens elsewhere
# svm_label = (self.regressor.predict(self.X) > self.regressor_threshold).astype(int)
except:
return False # very rare exception sometimes, idk why
def get_max(self):
return np.max(self.fX)
def get_mean(self):
return np.mean(self.fX)
def get_metric(self):
return self.get_max() if self.split_metric == 'max' else self.get_mean(
)
def plot_samples_and_boundary(self, func, name):
assert func.dims == 2
plabels = self.svm.predict(self.split_X)
good_counts = len(self.split_X[np.where(plabels == 0)])
bad_counts = len(self.split_X[np.where(plabels == 1)])
good_mean = np.mean(self.fX[np.where(plabels == 0)])
bad_mean = np.mean(self.fX[np.where(plabels == 1)])
if np.isnan(good_mean) == False and np.isnan(bad_mean) == False:
assert good_mean > bad_mean
lb = func.lb
ub = func.ub
x = np.linspace(lb[0], ub[0], 100)
y = np.linspace(lb[1], ub[1], 100)
xv, yv = np.meshgrid(x, y)
true_y = []
for row in range(0, xv.shape[0]):
for col in range(0, xv.shape[1]):
x = xv[row][col]
y = yv[row][col]
true_y.append(func(np.array([x, y])))
true_y = np.array(true_y)
if self.splitter_type == 'kmeans':
pred_labels = self.svm.predict(np.c_[xv.ravel(), yv.ravel()])
elif self.splitter_type == 'linreg':
raise NotImplementedError # TODO if we need this later
pred_labels = pred_labels.reshape(xv.shape)
fig, ax = plt.subplots()
ax.contour(xv, yv, true_y.reshape(xv.shape), cmap=cm.coolwarm)
ax.contourf(xv, yv, pred_labels, alpha=0.4)
ax.scatter(self.split_X[np.where(plabels == 0), 0],
self.split_X[np.where(plabels == 0), 1],
marker='x',
label="good-" + str(np.round(good_mean, 2)) + "-" +
str(good_counts))
ax.scatter(self.split_X[np.where(plabels == 1), 0],
self.split_X[np.where(plabels == 1), 1],
marker='x',
label="bad-" + str(np.round(bad_mean, 2)) + "-" +
str(bad_counts))
ax.legend(loc="best")
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_xlim([-10, 10])
ax.set_ylim([-10, 10])
plt.savefig(name)
plt.close()
def update_samples(self, latest_latent_samples, latest_split,
latest_true_samples, latest_returns):
# assert type(latest_samples) == type([])
# real_X = []
# X = []
# fX = []
# for sample in latest_real_samples:
# real_X.append(sample[0])
# for sample in latest_samples:
# if self.args.final_obs_split:
# self.split_dims = np.prod(sample[3].shape)
# X.append(sample[3]) # final obs
# else:
# X.append( sample[0] )
# fX.append( sample[1] )
self.sample_X = np.asarray(latest_latent_samples,
dtype=np.float32).reshape(
-1, self.sample_dims)
self.split_X = np.asarray(latest_split, dtype=np.float32).reshape(
-1, self.split_dims)
self.true_X = np.asarray(latest_true_samples,
dtype=np.float32).reshape(-1, self.true_dims)
self.fX = np.asarray(latest_returns, dtype=np.float32).reshape(-1)
assert self.sample_X.shape[0] == self.split_X.shape[
0] == self.true_X.shape[0] == self.fX.shape[0]
# self.samples = latest_samples
# self.real_samples = latest_real_samples
self.svm_label = None
def train_gpr(self, latent_samples, f_samples, samples=None):
# X = []
# fX = []
# for sample in latent_samples:
# X.append( sample[0] )
# fX.append( sample[1] )
X = np.asarray(latent_samples).reshape(-1, self.sample_dims)
fX = np.asarray(f_samples).reshape(-1)
# print("training GPR with ", len(X), " data X")
self.gpr.fit(X, fX)
###########################
# BO sampling with EI
###########################
def expected_improvement(self, X, xi=0.0001, use_ei=True):
''' Computes the EI at points X based on existing samples X_sample and Y_sample using a Gaussian process surrogate model.
Args: X: Points at which EI shall be computed (m x d). X_sample: Sample locations (n x d).
Y_sample: Sample values (n x 1). gpr: A GaussianProcessRegressor fitted to samples.
xi: Exploitation-exploration trade-off parameter.
Returns: Expected improvements at points X. '''
X_sample = self.true_X
Y_sample = self.fX.reshape((-1, 1))
gpr = self.gpr
mu, sigma = gpr.predict(X, return_std=True)
if not use_ei:
return mu
else:
#calculate EI
mu_sample = gpr.predict(X_sample)
sigma = sigma.reshape(-1, 1)
mu_sample_opt = np.max(mu_sample)
with np.errstate(divide='warn'):
imp = mu - mu_sample_opt - xi
imp = imp.reshape((-1, 1))
Z = imp / sigma
ei = imp * norm.cdf(Z) + sigma * norm.pdf(Z)
ei[sigma == 0.0] = 0.0
return ei
def plot_boundary(self, X):
if X.shape[1] > 2:
return
fig, ax = plt.subplots()
ax.scatter(X[:, 0], X[:, 1], marker='.')
ax.scatter(self.true_X[:, 0], self.true_X[:, 1], marker='x')
ax.set_xlim([-10, 10])
ax.set_ylim([-10, 10])
plt.savefig("boundary.pdf")
plt.close()
def get_sample_ratio_in_region(self, cands, path):
total = len(cands)
for node in path:
if len(cands) == 0:
return 0, np.array([])
assert len(cands) > 0
if node[0].classifier.splitter_type in ['kmeans', 'value']:
boundary = node[0].classifier.svm
cands = cands[boundary.predict(cands) == node[1]]
elif node[0].classifier.splitter_type == 'linreg':
cands = cands[(
node[0].classifier.regressor.predict(cands) <= node[0].
classifier.regressor_threshold).astype(int) == node[1]]
# node[1] store the direction to go
ratio = len(cands) / total
assert len(cands) <= total
return ratio, cands
def propose_rand_samples_probe(self, nums_samples, path, lb, ub):
seed = self.rng.randint(int(1e6))
sobol = SobolEngine(dimension=self.dims, scramble=True, seed=seed)
center = np.mean(self.true_X, axis=0)
#check if the center located in the region
ratio, tmp = self.get_sample_ratio_in_region(
np.reshape(center, (1, len(center))), path)
if ratio == 0:
if self.verbose:
print("==>center not in the region, using random samples")
return self.propose_rand_samples(nums_samples, lb, ub)
# it is possible that the selected region has no points,
# so we need check here
axes = len(center)
final_L = []
for axis in range(0, axes):
L = np.zeros(center.shape)
L[axis] = 0.01
ratio = 1
while ratio >= 0.9:
L[axis] = L[axis] * 2
if L[axis] >= (ub[axis] - lb[axis]):
break
lb_ = np.clip(center - L / 2, lb, ub)
ub_ = np.clip(center + L / 2, lb, ub)
cands_ = sobol.draw(10000).to(
dtype=torch.float64).cpu().detach().numpy()
cands_ = (ub_ - lb_) * cands_ + lb_
ratio, tmp = self.get_sample_ratio_in_region(cands_, path)
final_L.append(L[axis])
final_L = np.array(final_L)
lb_ = np.clip(center - final_L / 2, lb, ub)
ub_ = np.clip(center + final_L / 2, lb, ub)
if self.verbose:
print("center:", center)
print("final lb:", lb_)
print("final ub:", ub_)
count = 0
cands = np.array([])
while len(cands) < 10000:
count += 10000
cands = sobol.draw(count).to(
dtype=torch.float64).cpu().detach().numpy()
cands = (ub_ - lb_) * cands + lb_
ratio, cands = self.get_sample_ratio_in_region(cands, path)
samples_count = len(cands)
#extract candidates
return cands
def propose_rand_samples_sobol(self, nums_samples, path, lb, ub):
#rejected sampling
selected_cands = np.zeros((1, self.sample_dims))
seed = self.rng.randint(int(1e6))
sobol = SobolEngine(dimension=self.sample_dims,
scramble=True,
seed=seed)
# scale the samples to the entire search space
# ----------------------------------- #
# while len(selected_cands) <= nums_samples:
# cands = sobol.draw(100000).to(dtype=torch.float64).cpu().detach().numpy()
# cands = (ub - lb)*cands + lb
# for node in path:
# boundary = node[0].classifier.svm
# if len(cands) == 0:
# return []
# cands = cands[ boundary.predict(cands) == node[1] ] # node[1] store the direction to go
# selected_cands = np.append( selected_cands, cands, axis= 0)
# print("total sampled:", len(selected_cands) )
# return cands
# ----------------------------------- #
#shrink the cands region
ratio_check, centers = self.get_sample_ratio_in_region(
self.true_X, path)
# no current samples located in the region
# should not happen
# print("ratio check:", ratio_check, len(self.X) )
# assert ratio_check > 0
if ratio_check == 0 or len(centers) == 0:
return self.propose_rand_samples(nums_samples, lb, ub)
lb_ = None
ub_ = None
final_cands = []
for center in centers:
center = self.true_X[self.rng.randint(len(self.true_X))]
if self.use_gpr:
cands = sobol.draw(2000).to(
dtype=torch.float64).cpu().detach().numpy()
else: # just taking random samples, not reranking by expected improvement, so don't need as many
cands = sobol.draw(20).to(
dtype=torch.float64).cpu().detach().numpy()
ratio = 1
L = 0.0001
Blimit = np.max(ub - lb)
while ratio == 1 and L < Blimit:
lb_ = np.clip(center - L / 2, lb, ub)
ub_ = np.clip(center + L / 2, lb, ub)
cands_ = cp.deepcopy(cands)
cands_ = (ub_ - lb_) * cands_ + lb_
ratio, cands_ = self.get_sample_ratio_in_region(cands_, path)
if ratio < 1:
final_cands.extend(cands_.tolist())
L = L * 2
final_cands = np.array(final_cands)
if len(final_cands) > nums_samples:
final_cands_idx = self.rng.choice(len(final_cands), nums_samples)
return final_cands[final_cands_idx]
else:
if len(final_cands) == 0:
return self.propose_rand_samples(nums_samples, lb, ub)
else:
return final_cands
def propose_samples_rs(self,
latent_samples=None,
f_samples=None,
nums_samples=10,
path=None,
lb=None,
ub=None,
samples=None):
''' Proposes the next sampling point by optimizing the acquisition function.
Args: acquisition: Acquisition function. X_sample: Sample locations (n x d).
Y_sample: Sample values (n x 1). gpr: A GaussianProcessRegressor fitted to samples.
Returns: Location of the acquisition function maximum. '''
assert path is not None and len(path) >= 0
assert lb is not None and ub is not None
assert samples is not None and len(samples) > 0
nums_rand_samples = 10000
if len(path) == 0:
return self.propose_rand_samples(nums_samples, lb, ub)
X = self.propose_rand_samples_sobol(nums_rand_samples, path, lb, ub)
# print("samples in the region:", len(X) )
# self.plot_boundary(X)
if len(X) == 0:
print('Warning: len X is 0 in propose_samples_bo')
return self.propose_rand_samples(nums_samples, lb, ub)
proposed_X = X[self.rng.choice(len(X),
size=nums_samples,
replace=False)]
return proposed_X
def propose_samples_bo(self,
latent_samples=None,
f_samples=None,
nums_samples=10,
path=None,
lb=None,
ub=None,
samples=None):
''' Proposes the next sampling point by optimizing the acquisition function.
Args: acquisition: Acquisition function. X_sample: Sample locations (n x d).
Y_sample: Sample values (n x 1). gpr: A GaussianProcessRegressor fitted to samples.
Returns: Location of the acquisition function maximum. '''
assert path is not None and len(path) >= 0
assert lb is not None and ub is not None
assert samples is not None and len(samples) > 0
nums_rand_samples = 10000
if len(path) == 0:
return self.propose_rand_samples(nums_samples, lb, ub)
X = self.propose_rand_samples_sobol(nums_rand_samples, path, lb, ub)
# print("samples in the region:", len(X) )
# self.plot_boundary(X)
if len(X) == 0:
print('Warning: len X is 0 in propose_samples_bo')
return self.propose_rand_samples(nums_samples, lb, ub)
if self.use_gpr:
self.train_gpr(latent_samples, f_samples,
samples) # learn in unit cube
X_ei = self.expected_improvement(X, xi=0.001, use_ei=True)
row, col = X.shape
X_ei = X_ei.reshape(len(X))
n = nums_samples
if X_ei.shape[0] < n:
n = X_ei.shape[0]
indices = np.argsort(X_ei)[-n:]
proposed_X = X[indices]
else:
# np.random.shuffle(X)
perm = self.rng.permutation(len(X))
proposed_X = X[perm][:nums_samples]
return proposed_X
###########################
# sampling with turbo
###########################
# version 1: select a partition, perform one-time turbo search
def propose_samples_turbo(self, num_samples, path, func):
#throw a uniform sampling in the selected partition
X_init = self.propose_rand_samples_sobol(30, path, func.lb, func.ub)
#get samples around the selected partition
print("sampled ", len(X_init), " for the initialization")
turbo1 = Turbo1(
f=func, # Handle to objective function
lb=func.lb, # Numpy array specifying lower bounds
ub=func.ub, # Numpy array specifying upper bounds
n_init=30, # Number of initial bounds from an Latin hypercube design
max_evals=num_samples, # Maximum number of evaluations
batch_size=1, # How large batch size TuRBO uses
verbose=True, # Print information from each batch
use_ard=True, # Set to true if you want to use ARD for the GP kernel
max_cholesky_size=2000, # When we switch from Cholesky to Lanczos
n_training_steps=50, # Number of steps of ADAM to learn the hypers
min_cuda=1024, # Run on the CPU for small datasets
device="cuda"
if torch.cuda.is_available() else "cpu", # "cpu" or "cuda"
dtype="float32", # float64 or float32
X_init=X_init,
)
proposed_X, fX = turbo1.optimize()
fX = fX * -1
return proposed_X, fX
###########################
# sampling with CMA-ES
###########################
def propose_samples_cmaes(self, num_samples, path, init_within_leaf, lb,
ub):
# print('len self.X', len(self.X))
if len(
self.sample_X
) > num_samples: # since we're adding more samples as we go, start from the best few
best_indices = sorted(list(range(len(self.sample_X))),
key=lambda i: self.fX[i],
reverse=True)
tell_X, tell_fX = np.stack(
[
self.sample_X[i] for i in
best_indices[:max(self.LEAF_SAMPLE_SIZE, num_samples)]
],
axis=0), np.stack([
-self.fX[i] for i in
best_indices[:max(self.LEAF_SAMPLE_SIZE, num_samples)]
],
axis=0)
else:
tell_X, tell_fX = self.sample_X, np.array([-fx for fx in self.fX])
if init_within_leaf == 'mean':
x0 = np.mean(tell_X, axis=0)
elif init_within_leaf == 'random':
x0 = random.choice(tell_X)
elif init_within_leaf == 'max':
x0 = tell_X[tell_fX.argmax()] # start from the best
else:
raise NotImplementedError
range_ = np.mean(0.1 * (ub - lb))
sigma0 = np.mean(
[np.std(tell_X[:, i]) for i in range(tell_X.shape[1])])
sigma0 = max(range_, sigma0) # clamp min 1
# sigma0 = min(1, sigma0)
sigma0 *= self.cmaes_sigma_mult
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
es = cma.CMAEvolutionStrategy(x0,
sigma0,
inopts={
'maxfevals': num_samples,
'popsize': max(2, len(tell_X)),
'seed': self.rng.randint(MAXINT),
'bounds': [lb.mean(),
ub.mean()]
})
num_evals = 0
proposed_X = []
init_X = es.ask()
if len(tell_X) < 2:
pad_X = init_X[:2 - len(tell_X)]
proposed_X += pad_X
num_evals += 2 - len(tell_X)
else:
es.tell(tell_X, tell_fX)
while num_evals < num_samples:
new_X = es.ask()
if num_evals + len(new_X) > num_samples:
self.rng.shuffle(new_X)
new_X = new_X[:num_samples - num_evals]
else:
pass
proposed_X += new_X
num_evals += len(new_X)
assert num_evals == num_samples
return proposed_X
###########################
# sampling with gradient
###########################
def propose_samples_gradient(self, num_samples, path, func,
init_within_leaf, step):
# print('len self.X', len(self.X))
if len(
self.true_X
) > num_samples: # since we're adding more samples as we go, start from the best few
best_indices = sorted(list(range(len(self.true_X))),
key=lambda i: self.fX[i],
reverse=True)
tell_X, tell_fX = np.stack(
[
self.true_X[i] for i in
best_indices[:max(self.LEAF_SAMPLE_SIZE, num_samples)]
],
axis=0), np.stack([
-self.fX[i] for i in
best_indices[:max(self.LEAF_SAMPLE_SIZE, num_samples)]
],
axis=0)
else:
tell_X, tell_fX = self.true_X, np.array([-fx for fx in self.fX])
if init_within_leaf == 'mean':
assert num_samples == 1
x0 = [np.mean(tell_X, axis=0)]
elif init_within_leaf == 'random':
indices = list(range(len(tell_X)))
random.shuffle(indices)
x0 = [tell_X[indices[i]] for i in range(num_samples)]
elif init_within_leaf == 'max':
indices = list(range(len(tell_X)))
indices = sorted(indices, key=lambda i: tell_fX[i], reverse=True)
x0 = [tell_X[indices[i]] for i in range(num_samples)]
proposed_X = func.latent_converter.improve_samples(x0,
func.env._get_obs(),
step=step)
# proposed_X = func.latent_converter.improve_samples(x0, func.env.get_obs(), step=step)
new_fX = [func(x) for x in proposed_X]
return proposed_X, [-fx for fx in new_fX]
###########################
# random sampling
###########################
def propose_rand_samples(self, nums_samples, lb, ub):
x = self.rng.uniform(lb, ub, size=(nums_samples, self.dims))
return x
def propose_samples_rand(self, nums_samples=10):
return self.propose_rand_samples(nums_samples, self.lb, self.ub)
###########################
# learning boundary
###########################
def get_cluster_metric(self, plabel):
assert plabel.shape[0] == self.fX.shape[0]
zero_label_fX = []
one_label_fX = []
for idx in range(0, len(plabel)):
if plabel[idx] == 0:
zero_label_fX.append(self.fX[idx])
elif plabel[idx] == 1:
one_label_fX.append(self.fX[idx])
else:
print(
"kmean should only predict two clusters, Classifiers.py:line73"
)
os._exit(1)
if self.split_metric == 'mean':
good_label_mean = np.mean( | np.array(zero_label_fX) | numpy.array |
import numpy as np
a = np.array([[1, 2, 3, 4], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
print(a)
print()
#numpy elemanlarıma erişiyorum
b = a[1:3, 1:4] #1.satır ve 3.satır al ve 1. satır ve 4.sütunu al
print(b)
print()
c = a[3, :]
print(c)
# 1)list index
a = np.arange(12)
print(a)
b = a[[2, 5, 7]] #2,5 ve 7. indisteki elemanları bu şekilde alabilirim burayı bir tuple olarak da verebilirdim a[(1,2,3)]
print(b)
#list indexiyle generic olarak elemanları çekmek istersem
a = np.arange(12)
print(a)
i = np.array([2, 5, 7, 9])
b = a[i]
print(b)
#2)elipsis indexleme:Sütunların hepsini almak istersem elipsis kullanabilirim ve birden fazla boyut varsa elipsis kullanmam uygun olacaktır
#3)bool indexleme:elimizdeki bir np arrayin elemanları aynı eleman sayısındaki bir true false listeye indexleyip işlem yapabiliriz
a = np.array([10, 20, 30, 40, 50])
b = np.array([True, True, False, True, False], dtype='bool')
c = a[b]
print(c)
#Not:Bir numpy dizisini karşılaştırma operatörüyle kullanırsam numpy dizisinin her elemanı işleme sokulur ve sonuç alınır
a = np.array([18, 40, 20, 80, 50])
b = a[a > 30] #bool indeksleme cok yaygın bir kullanım
print(b)
#iki numpy dizisi yardımıyla öğrenciler ve notlarını tutuyorum notu 50 den büyük öğrencilerimi çekiyorum
a = np.array(['ali', 'veli', 'selami', 'ayşe', 'fatma'])
b = np.array([10, 79, 20, 45, 70])
c = a[b >= 50] # notu 50 den büyük öğrenciler koşulu
print(c)
#bool vektörü sum işlemine sokulursa true değer 1 false değer 0 olur
import numpy as np
a = np.array([10, 79, 20, 45, 70, 91, 68])
result = np.sum(a > 30)
print(result)
#ortalamadan büyük değerleri almak istersem
a = np.array([10, 79, 20, 45, 70, 91, 68])
result = a[a >= np.mean(a)]
print(result)
#belirli bir aralıktaki degerleri almak istersem
a = np.array([10, 79, 20, 45, 70, 91, 68])
result = a[(a > 30) & (a < 80)]
print(result)
""""
a = np.array([10, 79, 20, 45, 70, 91, 68])
result = a[np.logical_and(a > 30, a < 80)]
print(result)
"""
#view:dilimleme ile yaptığımız gibi asıl diziden bir kısım alıp başka bir nesne oluşturmuyoruz.
#b nesnesi asıl dizideki değişikliklerden etkilenen ve kendisindeki değişiliklerle asıl diziyi etkileyebilecek bir nesne
#ek olarak yalnızca numpy da dilimleme işlemlerinde view oluşur.Diğer operasyonlarda view nesnesi oluşmaz
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(a)
print()
b = a[1:3, 0:2]
print(b)
print()
a[1, 1] = 100
print(b)
b[0, 0] = 50
print(a)
#ndarray boyut değiştirme nasıl yapılır;
a = np.arange(12)
b = np.reshape(a, (4, 3)) #4 satır 3 sütundan oluşan bir matris elde ettim reshape bağımsız bir nesne vermez view nesnesi verir
print(a)
print()
print(b)
""" boyut değiştirme işleminin global fonk ile yazımı
a = np.arange(12)
b = a.reshape((2, 6))
print(a)
print(b)
"""
#np array oluşturdum ve reshape ettim method ile
a = np.arange(12).reshape(3, 4)
print(a)
#reshape edilmiş bir matrisi başka bir boyuta almak istersem,ilk önce tek boyutmuş gibi düşüneceğiz ardından tek boyutlu matrisimizi yeni boyutlarda reshape ediyoruz
#resize global methodu ve resize fonksiyonu:esize view oluşturmadan direk copy yoluyla yeni nesne elde ediyoruz
a = np.array([10, 20, 30, 40, 50, 60])
b = np.resize(a, (3, 2))
print(a)
print('-----------')
print(b)
a[0] = 100
print('-----------')
print(a)
print('-----------')
print(b)
#resize aynı zamanda ndarrayin methodu olarakta kullanılıyor burada mevcut nesne kullanılıyor kopya oluşturulmuyor
a = np.array([10, 20, 30, 40, 50, 60])
a.resize(3, 2)
print(a)
print('-----------')
#flatten reshape edilmiş arrayi tek boyutlu hale getiriyor sütunları yan yana getiriyor
a = np.array([[10, 20, 30], [40, 50, 60]])
b = a.flatten()
print(b)
#transpoz global fonksiyonu ve methodu vardır.bu işlemde view nesnesi ile olur yani b deki değişim a yı etkiler
a = np.arange(12).reshape(4, 3)
print(a)
print('------')
b = np.transpose(a)
print(b)
print('------')
c = b.transpose()
print(c)
print('------')
#global vstack ve hstack fonksiyonları :
#1)vstack:n tane satır vektöründen bir tane yeni nesne oluşturuyor:3 bağımsız ndarray birleştirdim
x = np.array([1, 2, 3])
y = np.array([10, 20, 30])
z = np.array([100, 200, 300])
a = np.vstack((x, y, z))
print(a)
#2)hstack:horizontal yığılma yapıyor :hstackte sütun sütun işlem yapabilmek için sütun vektörü olmalı
x = np.array([[1], [2], [3]])
y = | np.array([[10], [20], [30]]) | numpy.array |
"""
image_retrieval.py (author: <NAME> / git: ankonzoid)
We perform image retrieval using transfer learning on a pre-trained
VGG image classifier. We plot the k=5 most similar images to our
query images, as well as the t-SNE visualizations.
"""
import os
import numpy as np
import tensorflow as tf
from sklearn.neighbors import NearestNeighbors
from src.CV_IO_utils import read_imgs_dir
from src.CV_transform_utils import apply_transformer
from src.CV_transform_utils import resize_img, normalize_img
from src.CV_plot_utils import plot_query_retrieval, plot_tsne, plot_reconstructions
from src.autoencoder import AutoEncoder
# Run mode: (autoencoder -> simpleAE, convAE) or (transfer learning -> vgg19)
modelName = "convAE" # try: "simpleAE", "convAE", "vgg19"
trainModel = True
parallel = True # use multicore processing
# Make paths
dataTrainDir = os.path.join(os.getcwd(), "data", "train")
dataTestDir = os.path.join(os.getcwd(), "data", "test")
outDir = os.path.join(os.getcwd(), "output", modelName)
if not os.path.exists(outDir):
os.makedirs(outDir)
# Read images
extensions = [".jpg", ".jpeg"]
print("Reading train images from '{}'...".format(dataTrainDir))
imgs_train = read_imgs_dir(dataTrainDir, extensions, parallel=parallel)
print("Reading test images from '{}'...".format(dataTestDir))
imgs_test = read_imgs_dir(dataTestDir, extensions, parallel=parallel)
shape_img = imgs_train[0].shape
print("Image shape = {}".format(shape_img))
# Build models
if modelName in ["simpleAE", "convAE"]:
# Set up autoencoder
info = {
"shape_img":
shape_img,
"autoencoderFile":
os.path.join(outDir, "{}_autoecoder.h5".format(modelName)),
"encoderFile":
os.path.join(outDir, "{}_encoder.h5".format(modelName)),
"decoderFile":
os.path.join(outDir, "{}_decoder.h5".format(modelName)),
}
model = AutoEncoder(modelName, info)
model.set_arch()
if modelName == "simpleAE":
shape_img_resize = shape_img
input_shape_model = (model.encoder.input.shape[1], )
output_shape_model = (model.encoder.output.shape[1], )
n_epochs = 300
elif modelName == "convAE":
shape_img_resize = shape_img
input_shape_model = tuple(
[int(x) for x in model.encoder.input.shape[1:]])
output_shape_model = tuple(
[int(x) for x in model.encoder.output.shape[1:]])
n_epochs = 500
else:
raise Exception("Invalid modelName!")
elif modelName in ["vgg19"]:
# Load pre-trained VGG19 model + higher level layers
print("Loading VGG19 pre-trained model...")
model = tf.keras.applications.VGG19(
weights='imagenet', include_top=False, input_shape=shape_img)
model.summary()
shape_img_resize = tuple([int(x) for x in model.input.shape[1:]])
input_shape_model = tuple([int(x) for x in model.input.shape[1:]])
output_shape_model = tuple([int(x) for x in model.output.shape[1:]])
n_epochs = None
else:
raise Exception("Invalid modelName!")
# Print some model info
print("input_shape_model = {}".format(input_shape_model))
print("output_shape_model = {}".format(output_shape_model))
# Apply transformations to all images
class ImageTransformer(object):
def __init__(self, shape_resize):
self.shape_resize = shape_resize
def __call__(self, img):
img_transformed = resize_img(img, self.shape_resize)
img_transformed = normalize_img(img_transformed)
return img_transformed
transformer = ImageTransformer(shape_img_resize)
print("Applying image transformer to training images...")
imgs_train_transformed = apply_transformer(
imgs_train, transformer, parallel=parallel)
print("Applying image transformer to test images...")
imgs_test_transformed = apply_transformer(
imgs_test, transformer, parallel=parallel)
# Convert images to numpy array
X_train = np.array(imgs_train_transformed).reshape((-1, ) + input_shape_model)
X_test = | np.array(imgs_test_transformed) | numpy.array |
import numpy as np
from skimage.draw import line
def dist(x1,x2):
"""
euclidian distance / norm
:param x1: a numeric 2-tuple
:param x2: another numeric 2-tuple
:return: magnitude of the vector between `x1` and `x2`
"""
# ((x_f - x_i)^2 + (y_f - y_i)^2)^{1/2}
return np.sqrt(np.sum(np.square(np.array(x1)-np.array(x2)), axis=0))
def angle(x1,x2):
"""
angle (in radians!) between two vectors.
:param x1: the destination of a vector rooted at (0,0)
:param x2: the destination of another vector rooted at (0,0)
:return: an angle in [-pi and pi] between two vectors
"""
# \tan^{-1}((y_f - y_i) / (x_f - x_i))
return (np.arctan2(x2[1],x2[0]) - np.arctan2(x1[1],x1[0]))
def tri_area(tri):
"""
finds the area of a triangle
:param tri: a 3-tuple of integer 2-tuples, describing a triangle.
:return: a positive real number, the area of `tri`
"""
tri = np.array(tri)
a = dist(tri[1],tri[0])
b = dist(tri[2],tri[0])
a_vec = tri[1]-tri[0]
b_vec = tri[2]-tri[0]
theta = angle(a_vec, b_vec)
return abs(0.5 * a * b * np.sin(theta))
def get_eq(segment, axis, verbose=False):
"""
given a segment and an oriented axis, find the line containing that segment.
:param segment: two points defining a line segment
:param axis: determines the orientation of the plane,
i.e., which axis is x and which axis is y.
:return: a function f(x) = a*y + b, defining the line containing `segment`
"""
# parse `segment` according to the oriented axis
if axis == 0:
(x1,y1),(x2,y2) = segment
elif axis == 1:
(y1,x1),(y2,x2) = segment
else:
raise NotImplementedError("only the two-dimensional case is supported.")
# differences along each axis
dy = y2-y1
dx = x2-x1
if dx == 0:
# see `constraint()` for an explanation of this ugliness
return lambda w: np.inf
# calculate the ascent and origin of the line
slope = dy/dx
shift = (y1+y2)/2 - slope * (x1+x2)/2
if verbose:
# give the equation
if axis == 0:
y = 'y'
x = 'x'
else:
y = 'x'
x = 'y'
print('{} = {}{} + {}'.format(y,slope,x,shift))
return lambda w: slope*w + shift
def constraint(f, orientation, parity):
"""
uses a line `f` to construct a partition of the euclidian plane
:param f: a linear equation |R -> |R, f(x) = y
:param orientation: determines the orientation of the plane,
i.e., which axis is x and which axis is y.
:param parity: determines the direction traversed along the oriented axis
:return: a function that labels points in the plane, per the partition.
"""
if f(0) == np.inf:
# orthogonal to oriented axis: this constraint can safely be ignored
return lambda x: True
else:
# not orthogonal
if parity == 1:
# assigns label "True" to the half-plane /below/ f
return lambda x: x[orientation-1] <= f(x[orientation])
elif parity == -1:
# assigns label "True" to the half-plane /above/ f
return lambda x: x[orientation-1] >= f(x[orientation])
else:
raise ValueError("parity must be +- 1")
def tri_bresenham(segment, axis, parity, constraint, verbose=False):
"""
modifies bresenham's line algorithm to find the points within a triangle.
todo - accept arbitrarily many columns ~ dimensions
:param segment: points describing the oriented side of the triangle
:param axis: the axis along which bresenham's algorithm traverses
:param parity: the direction in which bresenham's algorithm traverses
:param constraint: heuristic for point-triangle intersection
:param verbose: print debugging info
:return: a list of all pixels for which `constraint` evaluates to True
"""
a,b = segment
ab = X,Y = line(a[0],a[1],b[0],b[1])
assert len(X) == len(Y)
n = len(X)
points = []
prev_row = -1
for i in range(n):
# iterate over each unique column index
curr_row = ab[axis][i]
if curr_row == prev_row:
continue
# mutable position vector
x = [X[i],Y[i]]
# temp container for traversed points
col = []
# within the triangle
while constraint(x):
# collect points
col.append((x[0],x[1]))
if verbose:
print(x)
# and march forward as per axis & parity (dimension and direction)
x[axis-1] += parity
points.extend(col)
if verbose: print(col)
prev_row = curr_row
if verbose: print(points)
return points
def fill_tri(tri,verbose=False):
"""
wrapper for the tri_bresenham routine; generates all points within a
triangle. requires time and space linear to the area of the triangle.
:param tri: a 3-tuple of integer 2-tuples, describing a triangle.
:param verbose: print debugging info
:return: a list containing all points that fall within the triangle
"""
a,b,c = tri
# locate the longest side `ax` of the triangle
sides = [(a,b),(b,c),(c,a)]
ax_idx = np.argmax([dist(x[0],x[1]) for x in sides])
ax = sides[ax_idx]
ax_ang = angle(ax[0],ax[1])
# find the vertex opposing `ax`
c1,c2 = sides[ax_idx-1],sides[ax_idx-2]
assert c1[0] == c2[1]
t = c1[0]
# orient the triangle to an axis (per the closest to `ax`)
if np.pi/4 <= ax_ang < 3*np.pi/4 or -3*np.pi/4 <= ax_ang < -np.pi/4:
orientation = 1
else:
orientation = 0
# find the line containing `ax`
f = get_eq(ax,orientation,verbose=verbose)
# determine the parity of the triangle
if f(t[orientation]) < t[orientation-1]:
parity = 1
else:
parity = -1
if verbose:
print('axial:\t\t',ax)
print('angle:\t\t',ax_ang)
print('third:\t\t',t)
print('orientation:\t',orientation)
print('parity:\t\t',parity)
# lines containing the other two sides of the triangle
g = get_eq(c1,orientation,verbose=verbose)
h = get_eq(c2,orientation,verbose=verbose)
# build constraints with regard to the oriented side
under_g = constraint(g, orientation, parity)
under_h = constraint(h, orientation, parity)
# the points above f and below g,h - which makes up the whole triangle!
points = tri_bresenham(ax, orientation, parity,
constraint=lambda x: under_g(x) and under_h(x),
verbose=verbose)
return points
def lazy_patch(tri,verbose=False):
"""
a personally lazy, computationally expensive patch to `fill_tri`. instead of
using math or logic to determine the proper alignment, orientation, and
parity, just iterate all twelve combinations thereof: {1,2,3}x{0,1}x{-1,1}.
:param tri: a triangle, described by its three vertices points.
:param verbose: print debugging info
:return: a list containing all points that fall within the triangle
"""
a,b,c = tri
# locate the longest side `ax` of the triangle
sides = [(a,b),(b,c),(c,a)]
P = []
for ax_idx in [0,1,2]:
ax = sides[ax_idx]
ax_ang = angle(ax[0],ax[1])
# find the vertex opposing `ax`
c1,c2 = sides[ax_idx-1],sides[ax_idx-2]
assert c1[0] == c2[1]
t = c1[0]
# orient the triangle to an axis (per the closest to `ax`)
if np.pi/4 <= ax_ang < 3*np.pi/4 or -3*np.pi/4 <= ax_ang < -np.pi/4:
orientation = 1
else:
orientation = 0
for orientation in [0,1]:
# find the line containing `ax`
f = get_eq(ax,orientation,verbose=verbose)
# determine the parity of the triangle
for parity in [-1,1]:
if verbose:
print('axial:\t\t',ax)
print('angle:\t\t',ax_ang)
print('third:\t\t',t)
print('orientation:\t',orientation)
print('parity:\t\t',parity)
# lines containing the other two sides of the triangle
g = get_eq(c1,orientation,verbose=verbose)
h = get_eq(c2,orientation,verbose=verbose)
# build constraints with regard to the oriented side
under_g = constraint(g, orientation, parity)
under_h = constraint(h, orientation, parity)
# the points above f and below g,h - which makes up the whole triangle!
points = tri_bresenham(ax, orientation, parity,
constraint=lambda x: under_g(x) and under_h(x),
verbose=verbose)
if len(points) > 0:
P.append(points)
return P
def fill_tri_2(tri,verbose=False):
"""
first lazy patch - use whatever conditions generated the most points
:param tri: a triangle, described by its three vertices points.
:param verbose: print debugging info
:return: a list containing all points that fall within the triangle
"""
P = lazy_patch(tri, verbose)
p_idx = np.argmax([len(points) for points in P])
return P[p_idx]
def fill_tri_3(tri,verbose=False):
"""
second lazy patch - use whatever conditions generated the number of points
closest to the actual area of the triangle.
:param tri: a triangle, described by its three vertices points.
:param verbose: print debugging info
:return: a list containing all points that fall within the triangle
"""
P = lazy_patch(tri, verbose)
area = tri_area(tri)
p_idx = np.argmin([abs(len(points)-area) for points in P])
return P[p_idx]
def main():
import sys
errstr = "your input didn't make sense!\n"
errstr += "\ntry something like this:\n"
errstr += "\tpython try_raster.py u,v w,x y,z\n"
errstr += "where u,...,z are whole numbers."
if len(sys.argv) != 4:
raise ValueError(errstr)
Vstr = sys.argv[1:]
V = [(int(vs.split(',')[0]), int(vs.split(',')[1])) for vs in Vstr]
a,b,c = V
if not all([len(x) == 2 for x in V]):
raise ValueError(errstr)
X = [v[0] for v in V]
Y = [v[1] for v in V]
width = max(X) - min(X) + 2
height = max(Y) - min(Y) + 2
points = fill_tri_3([a,b,c], verbose=True)
print(len(points))
im = | np.zeros((width,height)) | numpy.zeros |
from dolfin import *
import mshr
import sys
import numpy as np
from capture_cpp_cout import capture_cpp_cout
import matplotlib.pyplot as plt
from fem import *
# Hexagon Coordinates:
# (-R,0), (-R/2, Sqrt(3)/2 R), ( R/2, Sqrt(3)/2 R),
# ( R,0), ( R/2,-Sqrt(3)/2 R), (-R/2,-Sqrt(3)/2 R)
def hexagon(x, R):
return (x[1]>-np.sqrt(3)/2*R) and \
(x[1]< np.sqrt(3)/2*R) and \
(( np.sqrt(3)*x[0]+x[1] < R*np.sqrt(3))) and \
(( np.sqrt(3)*x[0]+x[1] >-R*np.sqrt(3))) and \
((-np.sqrt(3)*x[0]+x[1] < R* | np.sqrt(3) | numpy.sqrt |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 20:36:20 2020
@author: duttar
Description : Reads the daily temperature data from DS633 catalog
Calculates the Accumulated Degree Days of Thawing from the temperature data
"""
import numpy as np
from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import scipy.io as sio
#
# Opening a file
file = open("allind.txt","r")
Counter = 0
# Reading from file
Content = file.read()
CoList = Content.split("\n")
for i in CoList:
if i:
Counter += 1
file.close()
# read the monthly file
for i in range(Counter):
f = open("allind.txt")
lines_variable = f.readlines()
line_var = lines_variable[i]
nc_file = line_var[0:-1]
f.close()
fh = Dataset(nc_file, mode='r')
VAR_2T = fh.variables['VAR_2T'][:]
VAR_2T = np.array(VAR_2T)
VAR_2T_domain = VAR_2T[:,4:12,57:88]
longitude1 = fh.variables['longitude'][:]
longitude1 = np.array(longitude1)
lons = longitude1[57:88] # selected longitude
latitude1 = fh.variables['latitude'][:]
latitude1 = np.array(latitude1)
lats = latitude1[4:12] # selected latitude
fh.close()
# calculate the daily average temperature
numdays = np.int(VAR_2T.shape[0]/24)
var_2T_avgday = np.zeros((numdays, 8, 31))
for latind in range(lats.shape[0]):
for lonind in range(lons.shape[0]):
varval = VAR_2T_domain[:,latind,lonind]
avg_2T = np.zeros((numdays,1))
for j in range(numdays):
ind = j*24 + np.arange(0,24, dtype=int)
avgday = (np.max(varval[ind]) + np.min(varval[ind]))/2
avg_2T[j] = avgday
var_2T_avgday[:, latind, lonind] = avg_2T.flatten(order='F')
var_name = line_var[48:69] + '.mat'
# save the file in matlab format
sio.savemat(var_name, {'var_2T_avgday':var_2T_avgday, 'latitude':lats, 'longitude':lons})
print_var = 'saved the matlab file: ' + var_name + '\n'
print(print_var)
##############################################################################
# plot the addt for the year 2016
lon, lat = np.meshgrid(lons, lats)
locnum = lon.shape[0]*lon.shape[1]
locall = np.concatenate((lon.reshape(locnum,1, order = 'F'), lat.reshape(locnum,1, order = 'F')), axis=1)
temp2016 = np.zeros((locnum, 366))
prevdays = 0
f = open("2016.txt")
lines_variable = f.readlines()
for i in range(12):
line_var = lines_variable[i]
matfile = line_var[0:-1]
mat_c1 = sio.loadmat(matfile)
var_2Tmat = mat_c1['var_2T_avgday']
lonmat = mat_c1['longitude']
latmat = mat_c1['latitude']
numdays = var_2Tmat.shape[0]
ind_days = prevdays + np.arange(numdays)
for j in range(locnum):
lon_pr = locall[j,0]
lat_pr = locall[j,1]
indlon = np.where(lonmat == lon_pr)
indlat = np.where(latmat == lat_pr)
indlon1 = indlon[1]
indlat1 = indlat[1]
temp2016[j, ind_days] = var_2Tmat[:, indlat1, indlon1].flatten(order='F')
prevdays = prevdays + numdays
var_name = 'temp2016.mat'
sio.savemat(var_name, {'temp2016':temp2016})
f.close()
# calculate ADDT (accumulated degree days of thawing)
details_addt = np.zeros((locnum, 3)) # contains detail of addt
# first column is number of days over 273.15
# second column - first day of above 273.15
# third column - last day of above 273.15
addt2016 = np.zeros((locnum, 365))
for i in range(locnum):
temp_i = temp2016[i, :]
ind_mtzero = np.where(temp_i > 273.15)
ind_mtzero = np.array(ind_mtzero)
first_day = ind_mtzero[0][0] + 1
last_day = ind_mtzero[0][-1] + 1
num_zeroday = last_day - first_day
addt_prev = 0
for days in range(num_zeroday):
if temp2016[i, first_day -1 + days] > 273.15:
addt_day = temp2016[i, first_day-1+days] - 273.15
else:
addt_day = 0
addt2016[i, days] = addt_prev + addt_day
addt_prev = addt_prev + addt_day
details_addt[i, 0] = num_zeroday
details_addt[i, 1] = first_day
details_addt[i, 2] = last_day
print(i)
var_name = 'addt2016.mat'
sio.savemat(var_name, {'addt2016':addt2016, 'details_addt':details_addt, \
'lonlat':locall})
##############################################################################
# plot the addt for the year 2017
lon, lat = np.meshgrid(lons, lats)
locnum = lon.shape[0]*lon.shape[1]
locall = np.concatenate((lon.reshape(locnum,1, order = 'F'), lat.reshape(locnum,1, order = 'F')), axis=1)
temp2017 = np.zeros((locnum, 365))
prevdays = 0
f = open("2017.txt")
lines_variable = f.readlines()
for i in range(12):
line_var = lines_variable[i]
matfile = line_var[0:-1]
print(matfile)
mat_c1 = sio.loadmat(matfile)
var_2Tmat = mat_c1['var_2T_avgday']
lonmat = mat_c1['longitude']
latmat = mat_c1['latitude']
numdays = var_2Tmat.shape[0]
ind_days = prevdays + np.arange(numdays)
for j in range(locnum):
lon_pr = locall[j,0]
lat_pr = locall[j,1]
indlon = np.where(lonmat == lon_pr)
indlat = np.where(latmat == lat_pr)
indlon1 = indlon[1]
indlat1 = indlat[1]
temp2017[j, ind_days] = var_2Tmat[:, indlat1, indlon1].flatten(order='F')
prevdays = prevdays + numdays
var_name = 'temp2017.mat'
sio.savemat(var_name, {'temp2017':temp2017})
f.close()
# calculate ADDT (accumulated degree days of thawing)
details_addt = np.zeros((locnum, 3)) # contains detail of addt
# first column is number of days over 273.15
# second column - first day of above 273.15
# third column - last day of above 273.15
addt2017 = np.zeros((locnum, 365))
for i in range(locnum):
temp_i = temp2017[i, :]
ind_mtzero = np.where(temp_i > 273.15)
ind_mtzero = np.array(ind_mtzero)
first_day = ind_mtzero[0][0] + 1
last_day = ind_mtzero[0][-1] + 1
num_zeroday = last_day - first_day
addt_prev = 0
for days in range(num_zeroday):
if temp2017[i, first_day -1 + days] > 273.15:
addt_day = temp2017[i, first_day-1+days] - 273.15
else:
addt_day = 0
addt2017[i, days] = addt_prev + addt_day
addt_prev = addt_prev + addt_day
details_addt[i, 0] = num_zeroday
details_addt[i, 1] = first_day
details_addt[i, 2] = last_day
print(i)
var_name = 'addt2017.mat'
sio.savemat(var_name, {'addt2017':addt2017, 'details_addt':details_addt, \
'lonlat':locall})
##############################################################################
# plot the addt for the year 2018
lon, lat = np.meshgrid(lons, lats)
locnum = lon.shape[0]*lon.shape[1]
locall = np.concatenate((lon.reshape(locnum,1, order = 'F'), lat.reshape(locnum,1, order = 'F')), axis=1)
temp2018 = np.zeros((locnum, 365))
prevdays = 0
f = open("2018.txt")
lines_variable = f.readlines()
for i in range(12):
line_var = lines_variable[i]
matfile = line_var[0:-1]
print(matfile)
mat_c1 = sio.loadmat(matfile)
var_2Tmat = mat_c1['var_2T_avgday']
lonmat = mat_c1['longitude']
latmat = mat_c1['latitude']
numdays = var_2Tmat.shape[0]
ind_days = prevdays + np.arange(numdays)
for j in range(locnum):
lon_pr = locall[j,0]
lat_pr = locall[j,1]
indlon = np.where(lonmat == lon_pr)
indlat = np.where(latmat == lat_pr)
indlon1 = indlon[1]
indlat1 = indlat[1]
temp2018[j, ind_days] = var_2Tmat[:, indlat1, indlon1].flatten(order='F')
prevdays = prevdays + numdays
var_name = 'temp2018.mat'
sio.savemat(var_name, {'temp2018':temp2018})
f.close()
# calculate ADDT (accumulated degree days of thawing)
details_addt = np.zeros((locnum, 3)) # contains detail of addt
# first column is number of days over 273.15
# second column - first day of above 273.15
# third column - last day of above 273.15
addt2018 = | np.zeros((locnum, 365)) | numpy.zeros |
from __future__ import print_function
from matplotlib.collections import PatchCollection,LineCollection
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import matplotlib.pyplot as plt
import numpy as np
from .. import utils
def plot_linestring(ls,**kwargs):
ax=kwargs.pop('ax',plt.gca())
c = np.array(ls.coords)
return ax.plot( c[:,0],c[:,1],**kwargs)[0]
def plot_multilinestring(mls,**kwargs):
ax=kwargs.pop('ax',plt.gca())
if mls.type == 'MultiLineString':
segs = [np.array(ls.coords) for ls in mls.geoms]
coll = LineCollection(segs,**kwargs)
ax.add_collection(coll)
return coll
else:
return plot_linestring(mls,**kwargs)
########
# New, non-hacked way to plot polygons with holes
# From: http://sgillies.net/blog/1013/painting-punctured-polygons-with-matplotlib/
def ring_coding(ob):
# The codes will be all "LINETO" commands, except for "MOVETO"s at the
# beginning of each subpath
n = len(ob.coords)
codes = np.ones(n, dtype=Path.code_type) * Path.LINETO
codes[0] = Path.MOVETO
# unsure of difference between CLOSEPOLY and leaving as is.
# codes[-1] = Path.CLOSEPOLY # doesn't seem to make a difference
return codes
def pathify(polygon):
# Convert coordinates to path vertices. Objects produced by Shapely's
# analytic methods have the proper coordinate order, no need to sort.
# 20170707: matplotlib pickier about ordering of internal rings, may have
# reverse interiors.
# 20170719: shapely doesn't guarantee one order or the other
def ensure_orientation(a,ccw=True):
"""
take an array-like [N,2] set of points defining a polygon,
return an array which is ordered ccw (or cw is ccw=False)
"""
a= | np.asarray(a) | numpy.asarray |
import numpy as np
from scipy.optimize import curve_fit
from sklearn.decomposition import PCA
from sklearn.linear_model import RidgeClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.model_selection import KFold
class NestedXval():
'''A generator for nested cross-validation that ensures that there is the
same number of trials for each class in training.
It is necessary to have the same number of trials in each category to
vectorize the training of the decoder so that the training of all 6
decoders (in one vs one scheme) is done simultaneously.
'''
def __init__(self, n_outer_splits=None):
'''Nested crossvalidation to get the same number of trials in each class for training'''
self.nouter = n_outer_splits
self.ninner = 2
self.outerxval = KFold(n_splits=n_outer_splits)
def split(self, targets):
'''Returns a generator that splits data in test, train and subspace
with the same number of trials in each category
'''
labels, counts = np.unique(targets, return_counts=True)
nclasses = len(labels)
if not np.all(counts[0] == counts) and max(counts) - min(counts) > 1:
raise ValueError("The number of trials in each class in not consistant")
interleaved_outer = np.concatenate(list(zip(*[np.where(targets == label)[0] for label in labels])))
leftovers = []
for iclass in np.where(np.min(counts) < counts)[0]:
leftovers.append(np.where(targets == labels[iclass])[0][-1])
interleaved_outer = np.concatenate((interleaved_outer, np.array(leftovers))).astype(int)
targets_ = targets[interleaved_outer]
outersplit = self.outerxval.split(targets)
for ioutsplit in range(self.nouter):
restinds, testinds = next(outersplit)
ntrain_per_class = np.ceil(len(restinds) / 2 / nclasses).astype(int)
inner_inds_by_class = [np.where(targets_[restinds] == label)[0] for label in labels]
traininds = np.concatenate(list(zip(*[restinds[classinds[:ntrain_per_class]] for classinds in inner_inds_by_class])))
subinds = np.concatenate([restinds[classinds[ntrain_per_class:]] for classinds in inner_inds_by_class])
testinds = interleaved_outer[testinds]
traininds = interleaved_outer[traininds]
subinds = interleaved_outer[subinds]
yield np.sort(testinds), np.sort(traininds), np.sort(subinds)
traininds = np.concatenate(list(zip(*[restinds[classinds[:-ntrain_per_class:-1]] for classinds in inner_inds_by_class])))
subinds = np.concatenate([restinds[classinds[-ntrain_per_class::-1]] for classinds in inner_inds_by_class])
testinds = interleaved_outer[testinds]
traininds = interleaved_outer[traininds]
subinds = interleaved_outer[subinds]
yield np.sort(testinds), np.sort(traininds), np.sort(subinds)
def sub_split(targets, trainind):
'''Cross-validation generator for the decoder and subspace trials
Function to split training trials in training and subspace trials, ensuring
that there is the same number of trials in each class for training.
Parameters
----------
targets : np.array - The targets (or y values)
trainind : np.array - The indices of the training trials
Returns
-------
Generator for each fold. Yields a tuple of np.array, one array for the
training trials and one array for the subspace
'''
targets = targets[trainind]
labels = np.unique(targets)
nclasses = len(labels)
ntrain_per_class = np.ceil(len(targets) / 2 / nclasses).astype(int)
inner_inds_by_class = [np.where(targets == label)[0] for label in labels]
ridgeind = np.concatenate(list(zip(*[classinds[:ntrain_per_class] for classinds in inner_inds_by_class])))
subind = np.concatenate([classinds[ntrain_per_class:] for classinds in inner_inds_by_class])
yield np.sort(trainind[ridgeind]), | np.sort(trainind[subind]) | numpy.sort |
import torch
import numpy as np
from typing import List, Optional, Dict, Tuple
from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances
from torch.nn import CrossEntropyLoss
from pyitlib import discrete_random_variable as drv
from sklearn.metrics.cluster import silhouette_score, calinski_harabasz_score
from categoryeval.ba import BAScorer
from categoryeval.dp import DPScorer
from categoryeval.cs import CSScorer
from preppy import Prep
from entropicstarttheory import configs
from entropicstarttheory.s_dbw import S_Dbw
from entropicstarttheory.rnn import RNN
from entropicstarttheory.representation import softmax
from entropicstarttheory.representation import make_inp_representations_without_context
from entropicstarttheory.representation import make_out_representations
def get_context2f(prep: Prep,
probes: List[str],
direction: str,
) -> Dict[str, int]:
"""
find the set of words that occur to the left/right of probes, and return them with their counts
if a set is returned, this under-estimates the contribution of very frequent contexts,
and over-estimates the contribution of infrequent contexts.
the best strategy is to return a dict with counts,
and to use this to compute representations,
and then to repeat each unique representation by the number of times the word was found.
for sem-2021 probes and AO-CHILDES,
there are approx 10K unique left contexts, while the full list has 200K contexts
"""
# offset should determine where to look for a probe.
# specifically, when context == 'l', we check for probes to the right, that is at location n + 1
offset = +1 if direction == 'l' else -1
res = dict()
for n, token in enumerate(prep.tokens[:-1]): # do not remove elements from left side of tokens
if prep.tokens[n+offset] in probes:
res.setdefault(token, 0)
res[token] += 1
return res
def calc_perplexity(model: RNN,
criterion: CrossEntropyLoss,
prep: Prep,
is_test: bool,
):
print(f'Calculating {"test" if is_test else "train"} perplexity...', flush=True)
pp_sum = 0
num_batches = 0
with torch.no_grad():
for windows in prep.generate_batches(is_test=is_test):
# to tensor
x, y = np.split(windows, [prep.context_size], axis=1)
inputs = torch.LongTensor(x).cuda()
targets = torch.LongTensor(np.squeeze(y)).cuda()
# calc pp (using torch only, on GPU)
logits = model(inputs)['logits'] # initial hidden state defaults to zero if not provided
loss_batch = criterion(logits, targets)
pp_batch = torch.exp(loss_batch) # need base e
pp_sum += pp_batch.cpu().numpy()
num_batches += 1
pp = pp_sum / num_batches
return pp
def eval_ba_performance(representations: np.array,
probe2cat: Dict[str, str]
):
"""
balanced accuracy.
Note:
only works for probe representations (because category labels are required)
"""
ba_scorer = BAScorer(probe2cat)
probe_sims = cosine_similarity(representations)
res = ba_scorer.calc_score(probe_sims, ba_scorer.gold_sims, 'ba', return_threshold=False)
return res
def eval_si_performance(representations: np.array,
probe2cat: Dict[str, str]
):
"""
silhouette score.
how well do probe representations cluster with representations of probes in the same class?
"""
categories = sorted(set(probe2cat.values()))
category_labels = [categories.index(probe2cat[p]) for p in probe2cat]
# compute silhouette score
res = silhouette_score(representations, category_labels, metric='cosine')
return res
def eval_sd_performance(representations: np.array,
probe2cat: Dict[str, str]
):
"""
S-Dbw score.
how well do probe representations cluster with representations of probes in the same class?
using code from https://github.com/alashkov83/S_Dbw
"""
categories = sorted(set(probe2cat.values()))
category_labels = [categories.index(probe2cat[p]) for p in probe2cat]
res = S_Dbw(representations,
category_labels,
centers_id=None,
method='Tong',
alg_noise='bind',
centr='mean',
nearest_centr=True,
metric='cosine')
return res
def eval_ma_performance(representations: np.array,
):
"""
magnitude of representations.
"""
ma = np.linalg.norm(representations, axis=1).mean() # computes magnitude for each vector, then mean
return ma
def eval_pr1_performance(representations: np.array,
types_eval: List[str],
prep: Prep,
model: RNN,
):
"""
divergence between actual and theoretical prototype.
the "actual prototype" is computed by averaging the representations of probes on their first singular dimension.
next, the output of the model is computed, and compared to the "theoretical prototype",
a probability distribution constructed by averaging over all probes' next-word probability distributions.
"""
dp_scorer = DPScorer(types_eval, prep.tokens, prep.types)
# compute average projection on first singular dimension
u, s, vt = | np.linalg.svd(representations, compute_uv=True) | numpy.linalg.svd |
"""
Generic methods for converting data between different spatial coordinate systems.
Uses pyproj library.
"""
import firedrake as fd
import pyproj
import numpy
from abc import ABC, abstractmethod
LL_WGS84 = pyproj.Proj(proj='latlong', datum='WGS84', errcheck=True)
class CoordinateSystem(ABC):
"""
Base class for horizontal coordinate systems
Provides methods for coordinate transformations etc.
"""
@abstractmethod
def to_lonlat(self, x, y):
"""Convert coordinates to latitude and longitude"""
pass
@abstractmethod
def get_vector_rotator(self, x, y):
"""
Returns a vector rotator object.
The rotator converst vector-valued data to/from longitude, latitude
coordinates.
"""
pass
def proj_transform(x, y, trans=None, source=None, destination=None):
"""
Transform coordinates from source to target system.
:arg x,y: coordinates, float or numpy.array_like
:kwarg trans: pyproj Transformer object (optional)
:kwarg source: source coordinate system, Proj object
:kwarg destination: destination coordinate system, Proj object
"""
if trans is None:
assert source is not None and destination is not None, \
'Either trans or source and destination must be defined'
trans = None
x_is_array = isinstance(x, numpy.ndarray)
y_is_array = isinstance(y, numpy.ndarray)
numpy_inputs = x_is_array or y_is_array
if numpy_inputs:
assert x_is_array and y_is_array, 'both x and y must be numpy arrays'
assert x.shape == y.shape, 'x and y must have same shape'
# transform only non-nan entries as proj behavior can be erratic
a = numpy.full_like(x, numpy.nan)
b = numpy.full_like(y, numpy.nan)
good_ix = numpy.logical_and(numpy.isfinite(x), numpy.isfinite(y))
a[good_ix], b[good_ix] = trans.transform(x[good_ix], y[good_ix])
else:
a, b = trans.transform(x, y)
return a, b
class UTMCoordinateSystem(CoordinateSystem):
"""
Represents Universal Transverse Mercator coordinate systems
"""
def __init__(self, utm_zone):
self.proj_obj = pyproj.Proj(proj='utm', zone=utm_zone, datum='WGS84',
units='m', errcheck=True)
self.transformer_lonlat = pyproj.Transformer.from_crs(
self.proj_obj.srs, LL_WGS84.srs)
self.transformer_xy = pyproj.Transformer.from_crs(
LL_WGS84.srs, self.proj_obj.srs)
def to_lonlat(self, x, y, positive_lon=False):
"""
Convert (x, y) coordinates to (latitude, longitude)
:arg x: x coordinate
:arg y: y coordinate
:type x: float or numpy.array_like
:type y: float or numpy.array_like
:kwarg positive_lon: should positive longitude be enforced?
:return: longitude, latitude coordinates
"""
lon, lat = proj_transform(x, y, trans=self.transformer_lonlat)
if positive_lon:
lon = numpy.mod(lon, 360.0)
return lon, lat
def to_xy(self, lon, lat):
"""
Convert (latitude, longitude) coordinates to (x, y)
:arg lon: longitude coordinate
:arg lat: latitude coordinate
:type longitude: float or numpy.array_like
:type latitude: float or numpy.array_like
:return: x, y coordinates
"""
x, y = proj_transform(lon, lat, trans=self.transformer_xy)
return x, y
def get_mesh_lonlat_function(self, mesh2d):
"""
Construct a :class:`Function` holding the mesh coordinates in
longitude-latitude coordinates.
:arg mesh2d: the 2D mesh
"""
dim = mesh2d.topological_dimension()
if dim != 2:
raise ValueError(f'Expected a mesh of dimension 2, not {dim}')
if mesh2d.geometric_dimension() != 2:
raise ValueError('Mesh must reside in 2-dimensional space')
x = mesh2d.coordinates.dat.data_ro[:, 0]
y = mesh2d.coordinates.dat.data_ro[:, 1]
lon, lat = self.transformer_lonlat.transform(x, y)
lonlat = fd.Function(mesh2d.coordinates.function_space())
lonlat.dat.data[:, 0] = lon
lonlat.dat.data[:, 1] = lat
return lonlat
def get_vector_rotator(self, lon, lat):
"""
Returns a vector rotator object.
The rotator converts vector-valued data from longitude, latitude
coordinates to mesh coordinate system.
"""
return VectorCoordSysRotation(LL_WGS84, self.proj_obj, lon, lat)
def convert_coords(source_sys, target_sys, x, y):
"""
Converts coordinates from source_sys to target_sys
This function extends pyproj.transform method by handling NaNs correctly.
:arg source_sys: pyproj coordinate system where (x, y) are defined in
:arg target_sys: target pyproj coordinate system
:arg x: x coordinate
:arg y: y coordinate
:type x: float or numpy.array_like
:type y: float or numpy.array_like
"""
if isinstance(x, numpy.ndarray):
# proj may give wrong results if nans in the arrays
lon = numpy.full_like(x, numpy.nan)
lat = numpy.full_like(y, numpy.nan)
goodIx = numpy.logical_and(numpy.isfinite(x), numpy.isfinite(y))
lon[goodIx], lat[goodIx] = pyproj.transform(
source_sys, target_sys, x[goodIx], y[goodIx])
else:
lon, lat = pyproj.transform(source_sys, target_sys, x, y)
return lon, lat
def get_vector_rotation_matrix(source_sys, target_sys, x, y, delta=None):
"""
Estimate rotation matrix that converts vectors defined in source_sys to
target_sys.
Assume that we have a vector field defined in source_sys: vectors located at
(x, y) define the x and y components. We can then rotate the vectors to
represent x2 and y2 components of the target_sys by applying a local
rotation:
.. code-block:: python
R, theta = get_vector_rotation_matrix(source_sys, target_sys, x, lat)
v_xy = numpy.array([[v_x], [v_y]])
v_new = numpy.matmul(R, v_xy)
v_x2, v_y2 = v_new
"""
if delta is None:
delta = 1e-6 # ~1 m in LL_WGS84
x1, y1 = pyproj.transform(source_sys, target_sys, x, y)
x2, y2 = pyproj.transform(source_sys, target_sys, x, y + delta)
dxdl = (x2 - x1) / delta
dydl = (y2 - y1) / delta
theta = numpy.arctan2(-dxdl, dydl)
c = numpy.cos(theta)
s = | numpy.sin(theta) | numpy.sin |
''' Classes for sparse vectors, lists of related tensors and tensors describing
molecular graphs
'''
from __future__ import division, print_function, absolute_import
import numpy as np
import pickle as pkl
from .utils import mol_dims_to_shapes, mol_shapes_to_dims
class SparseTensor(object):
''' An immutable class for sparse tensors of any shape, type and sparse value.
# Arguments
nonsparse_indices (nested int-array): List of arrays with indices for
nonsparse elements at each dimension
nonsparse_values (int-array): array of corresponding values
default_value (of same dtype): The value that will be used for the non-
sparse indices
dtype (str/np.dtype): dtype, if `None`, dtype of nonsparse_values will be
used
main_axis (int): Axis along which `len` and `__getitem__ ` will work.
assume sorted (bool): Only set to true if `nonsparse_indices[main_axis]`
is sorted! (To speed up initialisation)
# Attributes
shape (tuple): The sparse tensor has no real shape, `tensor.as_array()`
takes a `shape` argument. However, the tensor does have a mimimum
size for each dimension (determined by the nonsparse element at the
furthest position on that dimension)
dtype (str/dtype): Can be changed after the tensor is created
ndims (int): number of dimensions
# Notes
- This class is optimised for storage of data. The idea is that one of the
dimensions is declared to be the `main_axis`. (This would be the axis
along which the different datapoints are defined). All indexing occurs
along this axis.
- This class is not optimised for tensor operations, use `as_array` / numpy
for that
- Is best initialised trough the classmethod `from_array`
- As the object is like an immutable object, there is no support for
assignment or retrieval of individual entries. Use
`tensor.as_array()[indices]` instead.
- Currently, code is mainly optimised for retrieval of (relatively small)
batches.
# TODO, possible optimisations:
- discard main index but use lookup when storing
- build new lookup during __getitem__ and pass on init of new tensor to
avoid expensive rebuilding
'''
def __init__(self, nonsparse_indices, nonsparse_values, default_value=0,
max_shape=None, dtype=None, main_axis=0, assume_sorted=False):
# Assert valid index and convert negative indices to positive
ndims = len(nonsparse_indices)
main_axis = range(ndims)[main_axis]
self.main_axis = main_axis
self.default_value = default_value
# Sort if necessary
if not assume_sorted and len(nonsparse_values):
nonsparse_entries = zip(nonsparse_values, *nonsparse_indices)
sorted(nonsparse_entries, key=lambda x: x[main_axis+1])
sorted_entries = zip(*nonsparse_entries)
nonsparse_values = list(sorted_entries[0])
nonsparse_indices = list(sorted_entries[1:])
self.nonsparse_indices = [np.array([]) for _ in range(ndims)]
self.nonsparse_values = np.array([])
# Convert indices and values to numpy array and check dimensionality
for i, ind in enumerate(nonsparse_indices):
assert len(ind) == len(nonsparse_values), 'nonsparse_indices (size{0} @index {1}) should be of same length as nonsparse_values (size {2})'.format(len(ind), i, len(nonsparse_values))
nonsparse_indices[i] = np.array(ind, dtype='int')
self.nonsparse_indices = nonsparse_indices
self.nonsparse_values = np.array(nonsparse_values)
# Calculate and set the shape
if len(self.nonsparse_values):
self.true_shape = tuple([max(inds)+1 for inds in nonsparse_indices])
else:
self.true_shape = tuple([0]*ndims)
# Setting dtype will alter self.nonsparse_values
dtype = dtype or self.nonsparse_values.dtype
self.dtype = dtype
# Setting max_shape will check if shape matches with nonsparse entries
self.max_shape = max_shape or [None]*ndims
# Build lookup for quick indexing along the main_axis
# lookup defines first position of that element
self.lookup = np.searchsorted(nonsparse_indices[self.main_axis],
range(self.shape[self.main_axis]+1))
@property
def max_shape(self):
return self._max_shape
@max_shape.setter
def max_shape(self, max_shape):
for true_s, max_s, in zip(self.true_shape, max_shape):
assert (max_s is None) or (max_s>=true_s) , 'Cannot set max_shape {} smaller than true shape {}'.format(max_shape, self.true_shape)
self._max_shape = tuple(max_shape)
@property
def shape(self):
return tuple([true_s if max_s==None else max_s
for true_s, max_s in zip(self.true_shape, self.max_shape)])
@property
def ndims(self):
return len(self.nonsparse_indices)
@property
def dtype(self):
return self._dtype
@dtype.setter
def dtype(self, dtype):
self._dtype = np.dtype(dtype)
self.nonsparse_values = self.nonsparse_values.astype(self.dtype)
def _nonsparse_entries(self, keys):
''' Returns indices and values required to create a new SparseTensor
given the provided keys (along main_axis)
# Arguments:
keys (int/list): The keys for which to return the nonspare entries
# Returns:
indices (np.array): the new nonsparse indices (concatenated)
values (np.array): the corresponding values (concatenated)
# Note:
mainly meant for internal use. Helper function of `self.__getitem__`
'''
if isinstance(keys, int):
while keys < 0:
keys += len(self)
start_stop = self.lookup[keys:keys+2]
if len(start_stop):
inds = range(*start_stop)
else:
inds = []
indices = [indices[inds] for indices in self.nonsparse_indices]
values = self.nonsparse_values[inds]
return indices, values
elif isinstance(keys, (list, tuple, np.ndarray)):
indices = [[] for _ in range(self.ndims)]
values = []
for g, key in enumerate(keys):
add_indices, add_values = self._nonsparse_entries(key)
values.append(add_values)
for i in range(self.ndims):
if i == self.main_axis:
# For the main_axis, rewrite the keys in chronological
# order (e.g. respect the ordering provided by keys)
indices[i].append(np.array([g]*len(add_values)))
else:
indices[i].append(add_indices[i])
indices = [np.concatenate(inds) for inds in indices]
values = np.concatenate(values)
return indices, values
else:
raise ValueError
# Magic funcions
def __len__(self):
return self.shape[self.main_axis]
def __getitem__(self, keys):
'''Gets the requested datapoints (along main axis) as SparseTensor
# Arguments:
keys (int, slice, list-like): Only one dimensional indexing is allowed
# Returns:
tensor (selfDataTensor): A new `SparseTensor` that corresponds
to the requested keys
'''
# Ensure keys is of usable type
if isinstance(keys, slice):
start, stop, step = keys.indices(len(self))
keys = range(start, stop, step)
if isinstance(keys, (tuple, list, np.ndarray)):
if len(keys) == 0:
raise IndexError('Cannot index `SparseTensor` with empty slice (`[]`)')
else:
assert isinstance(keys[0], int), 'Indexing is only allowed along the main axis ({})'.format(self.main_axis)
elif isinstance(keys, int):
pass
else:
raise IndexError('Only int, list, np.ndarray or slice (`:`) allowed for indexing `SparseTensor`')
# Copy properties of self to be passed to child object (make them mutatable)
indices, values = self._nonsparse_entries(keys)
max_shape = list(self.max_shape)
main_axis = int(self.main_axis)
# If getting a single element, drop singleton dimension
if isinstance(keys, int):
indices.pop(main_axis)
max_shape.pop(main_axis)
# Determining the new main axis is actually a trivial decision
main_axis = min(main_axis, len(max_shape)-1)
return self.__class__(dtype=self.dtype,
nonsparse_indices=indices, nonsparse_values=values,
main_axis=main_axis, default_value=self.default_value,
max_shape=max_shape)
def __repr__(self):
return "%s(dtype='%s', nonsparse_indices=%r, nonsparse_values=%r, main_axis=%r, default_value=%r, max_shape=%r)" % (
self.__class__.__name__, self.dtype,
[list(ind) for ind in self.nonsparse_indices],
list(self.nonsparse_values), self.main_axis, self.default_value,
self.max_shape)
def __str__(self):
return "%s(dtype='%s', shape=%s, default_value=%s)" % (
self.__class__.__name__, self.dtype, self.shape, self.default_value)
def __eq__(self, other):
''' Returns true if the sparse matrix can be expressed as other (by
forcing it into the same shape).
If shapes cannot match, raises
Note that `sparse.as_array(full_shape) == sparse` will have good performance,
because it uses this method, but `sparse == sparse.as_array(full_shape)`
will not work well, because numpy (1.11.2) will try to do the comparison
instead of calling this method.
'''
if isinstance(other, SparseTensor):
other = other.as_array()
shape = [max(s, o) for s,o in zip(self.shape, other.shape)]
else:
other = np.array(other)
shape = other.shape
return self.as_array(shape) == other
def __ne__(self, other):
return np.invert(self == other)
# Export and import functionality
@classmethod
def from_array(cls, arr, dtype=None, main_axis=0, default_value=0,
max_shape=None):
''' Turns a regular array or array-like into a SparseTensor
# Arguments:
arr (array-like): The array to convert into a SparseTensor
dtype (str/np.dtype): The datatype to use. If none is provided then
`np.array(arr).dtype` will be used
default_value (of same dtype): The nonsparse value to filter out
# Returns:
tensor (SparseTensor): s.t. `tensor.as_array(arr.shape) == arr`
'''
arr = np.array(arr)
nonsparse_indices = list(np.where(arr != default_value))
nonsparse_values = arr[nonsparse_indices]
# Assume_sorted if main_axis=0 because of np.where
assume_sorted = main_axis==0
return cls(dtype=arr.dtype, nonsparse_indices=nonsparse_indices,
nonsparse_values=nonsparse_values, main_axis=0,
max_shape=max_shape, default_value=default_value,
assume_sorted=assume_sorted)
def as_array(self, shape=None):
'''Returns the SparseTensor as a nonsparse np.array
# Arguments:
shape (tuple/list): option to overwrite `self.max_shape` for
this call. Array returned will have this shape.
If None, `self.shape` will be used. (note that `self.shape` is
defined by `self.max_shape`, or `self.true_shape` where `self.max_shape`
is None). None values can also be used for individual dimensions
wihin the shape tuple/list.
Note that `shape` should be at least as big as `self.true_shape`.
# Returns:
out (np.array): nonsparse array of self.dtype
'''
if not shape:
shape = [None] * self.ndims
# Overwrite None with self.shape
shape = [true_s if s==None else s for s, true_s in zip(shape, self.shape)]
# Check if obtained shape matches with self.true_shape
assert np.all([s >=true_s for s, true_s in zip(shape, self.true_shape)]), 'shape ({}) should be at least {}'.format(shape, self.true_shape)
out = np.zeros(shape, dtype=self.dtype)
out.fill(self.default_value)
out[self.nonsparse_indices] = self.nonsparse_values
return out
def to_config(self, jsonify=False):
''' Returns a dict that can be used to recreate the file efficiently
# Arguments:
jsonify (bool): If True, dict will be jsonifiably (no `np.arrays`)
# Returns:
config (dict): that can be used in `SparseTensor.from_config`
'''
if jsonify:
nonsparse_indices = [i.tolist() for i in self.nonsparse_indices]
nonsparse_values = self.nonsparse_values.tolist()
else:
nonsparse_indices = self.nonsparse_indices
nonsparse_values = self.nonsparse_values
return dict(nonsparse_indices=nonsparse_indices, nonsparse_values=nonsparse_values,
default_value=self.default_value, dtype=str(self.dtype),
main_axis=self.main_axis, max_shape=self.max_shape,)
@classmethod
def from_config(cls, config):
''' Returns a SparseTensor based on the `config` dict
'''
return cls(nonsparse_indices=config['nonsparse_indices'],
nonsparse_values=config['nonsparse_values'],
default_value=config['default_value'], dtype=config['dtype'],
main_axis=config['main_axis'], max_shape=config['max_shape'],
assume_sorted=True)
class TensorList(object):
''' Helperclass to cluster tensors together, acts as a single list by propageting
calls and slicing trough it's members.
# Arguments:
tensors (list of iterables): Should have the same length
# Example:
```
>>> tensors = TensorList([np.zeros((5,4)), np.ones((5,2,2)), -np.ones((5,))])
>>> tensors.shape
[(5, 4), (5, 2, 2), (5,)]
>>> tensors[0:1]
[array([[ 0., 0., 0., 0.]]), array([[[ 1., 1.], [ 1., 1.]]]), array([-1.])]
```
'''
def __init__(self, tensors):
lengths = set([len(t) for t in tensors])
assert len(lengths) == 1, 'Length of all tensors should be the same'
self.length = list(lengths)[0]
self.tensors = tensors
def map(self, fn):
''' Apply function to all tensors and return result
'''
return [fn(t) for t in self.tensors]
def apply(self, fn):
''' Apply function to all tensors and replace with
'''
self.tensors = self.map(fn)
def __getitem__(self, key):
return [t[key] for t in self.tensors]
@property
def shape(self):
return [t.shape for t in self.tensors]
def __repr__(self):
return "%s(tensors=%r)" % (self.__class__.__name__, self.tensors)
def __len__(self):
return self.length
class GraphTensor(TensorList):
''' Datacontainer for (molecular) graph tensors.
This datacontainer mainly has advantages for indexing. The three tensors
describing the graph are grouped in a tensorlist so that `graph_tensor[x]`
will return atoms[x], bonds[x], edges[x]
Furthermore, this container allows for sparse dimensions. A sparse dimension
means that for each batch, that dimension is minimized to the maximum
length that occurs within that batch.
# Arguments:
mol_tensors (tuple): tuple of np.array of nonspares mol tensors
(atoms, bonds, edges)
sparse_max_atoms (bool): Wether or not max_atoms should be a sparse
dimension.
sparse_max_degree (bool): Wether or not max_degree should be a sparse
dimension.
'''
def __init__(self, mol_tensors, sparse_max_atoms=True, sparse_max_degree=False):
self.sparse_max_atoms = sparse_max_atoms
self.sparse_max_degree = sparse_max_degree
(max_atoms, max_degree, num_atom_features, num_bond_features,
num_molecules) = mol_shapes_to_dims(mol_tensors)
# Set sparse dimension sizes to None
num_molecules = None
if sparse_max_atoms:
max_atoms = None
if sparse_max_degree:
max_degree = None
max_shapes = mol_dims_to_shapes(max_atoms, max_degree, num_atom_features,
num_bond_features)
# Convert into SparseTensors
atoms, bonds, edges = mol_tensors
atoms = SparseTensor.from_array(atoms, max_shape=max_shapes[0])
bonds = SparseTensor.from_array(bonds, max_shape=max_shapes[1])
edges = SparseTensor.from_array(edges, max_shape=max_shapes[2], default_value=-1)
# Initialise with result
super(GraphTensor, self).__init__([atoms, bonds, edges])
def __getitem__(self, keys):
# Make sure we don't lose the num_molecules dimension
if isinstance(keys, int):
keys = [keys]
# Get each sliced tensor as a new `SparseTensor` object
sliced_tensors = [t[keys] for t in self.tensors]
# Make sure that max_atoms and max_degree match across all tensors,
# (for isolated nodes (atoms), this is not always the case)
# Use the max value across all tensors
max_atoms_vals = [t.shape[1] for t in sliced_tensors]
max_degree_vals = [t.shape[2] for t in sliced_tensors[1:]]
max_atoms = max(max_atoms_vals)
max_degree = max(max_degree_vals)
# Return tensors with the matching shapes
shapes = mol_dims_to_shapes(max_atoms, max_degree, None, None, len(keys))
return [t.as_array(shape) for t, shape in zip(sliced_tensors, shapes)]
@property
def max_shape(self):
return [t.max_shape for t in self.tensors]
@property
def true_shape(self):
return [t.max_shape for t in self.tensors]
class EpochIterator(object):
''' Iterates over a dataset. (designed for keras fit_generator)
# Arguments:
data (tuple): Tuple of data to iterate trough, usually `(x_data, y_data)`,
though a tuple of any length can be passed. The iterables inside the
tuple should support list-indexing.
batch_size (int): Number of datapoints yielded per batch
epochs (int/None): maximum number of epochs after which a `StopIteration`
is raised (None for infinite generator)
shuffle (bool): Wether to shuffle at the onset of each epoch
# Yields
batch (tuple): tuple corresponding to the `data` tuple that contains a
slice of length `batch_size` (except possibly on last batch of epoch)
# Example
using `keras.models.model`
>>> model.fit_generator(EpochIterator(np.array(zip(data, labels)))
# Note
designed for use with keras `model.fit_generator`
'''
def __init__(self, data, batch_size=1, epochs=None, shuffle=True):
self.data = TensorList(data)
self.epochs = epochs or np.inf
self.batch_size = batch_size
self.shuffle = shuffle
# Initialise counters
self.reset()
def __iter__(self):
return self
def next(self):
# At the end of an epoch, raise Stopiteration, or reset counter
if self.i >= len(self.data):
if self.epoch >= self.epochs:
raise StopIteration
else:
self.i = 0
self.epoch += 1
# At the begin of an epoch, shuffle the order of the data
if self.i==0 and self.shuffle:
np.random.shuffle(self.indices)
# Get the indices for this batch, and update counter i
use_inds = self.indices[self.i:self.i+self.batch_size]
self.i += len(use_inds)
# Return as tuple
return tuple(self.data[use_inds])
def reset(self):
''' Resets the counters of the iterator
'''
self.i = 0
self.epoch = 1
self.indices = range(len(self.data))
def unit_tests_sparse_tensor(seed=None):
np.random.seed(seed)
arr = np.random.randint(3, size=(2000,30,5,8))
sparse = SparseTensor.from_array(arr)
singleton_shape = arr.shape[1:]
full_shape = (None,) + singleton_shape
print('Testing: `as_array` should return same as input to `from_array`')
assert np.all(sparse.as_array(full_shape) == arr)
print('Testing: Integer indexing should be identical to numpy')
assert np.all(sparse[0].as_array(singleton_shape) == arr[0])
print('Testing: Negative integer indexing should be identical to numpy')
assert np.all(sparse[len(sparse)-1].as_array(singleton_shape) == sparse[-1].as_array(singleton_shape) )
print('Testing: List indexing should be identical to numpy')
get_inds = [2,-1,3,6,0,0,1]
assert np.all(sparse[get_inds].as_array(full_shape) == arr[get_inds])
print('Testing: Slice indexing should be identical to numpy')
assert np.all(sparse[::-1].as_array(full_shape) == arr[::-1])
print('Testing: Various indexing testcases that should return same array as sparse')
assert np.all(sparse.as_array(full_shape) == sparse[:].as_array(full_shape))
assert np.all(sparse.as_array(full_shape) == sparse[0:len(sparse)+10].as_array(full_shape))
print('Testing: Equality functions return `True` for all entries when comparing sparse with sparse')
assert np.all(sparse == sparse.as_array(full_shape))
# assert np.all(sparse.as_array(full_shape) == sparse)
print('Testing: Equality functions return `True` for all entries when comparing sparse with original array')
assert np.all(arr == sparse.as_array(full_shape))
# assert np.all(sparse.as_array(full_shape) == arr)
print('Testing: Equality functions should return same boolean array as numpy')
assert np.all((arr[0] == 0) == (sparse[0] == 0))
assert np.all((arr[0] == arr[3]) == (sparse[0] == sparse[3]))
print('Testing: Inequality functions return `False` for all entries when comparing sparse with sparse')
assert not np.all(sparse != sparse.as_array(full_shape))
# assert not np.all(sparse.as_array(full_shape) != sparse)
print('Testing: Inequality functions return `False` for all entries when comparing sparse with original array')
assert not np.all(arr != sparse.as_array(full_shape))
assert not np.all(sparse.as_array(full_shape) != arr)
print('Testing: Ineuality functions should return same boolean array as numpy')
assert np.all((arr[0] != 0) == (sparse[0] != 0))
assert np.all((arr[0] != arr[3]) == (sparse[0] != sparse[3]))
print('Testing: `repr` can reproduce sparse')
assert np.all(eval(repr(sparse)) == sparse)
print('Testing: `from_config` can reproduce `sparse.to_config`')
assert np.all(SparseTensor.from_config(sparse.to_config(False)) == sparse)
assert np.all(SparseTensor.from_config(sparse.to_config(True)) == sparse)
print('Testing: unpickled pickles object reproduces itself')
assert np.all(pkl.loads(pkl.dumps(sparse)) == sparse)
assert np.all(pkl.loads(pkl.dumps(sparse)) == sparse.as_array())
def unit_tests_graph_tensor(seed=None):
np.random.seed(seed)
# Parameters for generative model
num_molecules=50
max_atoms = 40
max_degree = 6
num_atom_features = 62
num_bond_features = 8
# Generate/simulate graph tensors
atoms = np.zeros((num_molecules, max_atoms, num_atom_features))
bonds = np.zeros((num_molecules, max_atoms, max_degree, num_bond_features))
edges = np.zeros((num_molecules, max_atoms, max_degree)) -1
# Generate atoms for each molecule
for i, n_atoms in enumerate(np.random.randint(1, max_atoms, size=num_molecules)):
atoms[i, 0:n_atoms, :] = np.random.randint(3, size=(n_atoms, num_atom_features))
# Generator neighbours/bonds for each atom
for a, degree in enumerate(np.random.randint(max_degree, size=n_atoms)):
bonds[i, a, 0:degree, :] = np.random.randint(3, size=(degree, num_bond_features))
edges[i, a, 0:degree] = np.random.randint(max_degree, size=degree)
mols = GraphTensor([atoms, bonds, edges], sparse_max_atoms=True,
sparse_max_degree=True)
max_atoms_sizes = set([])
max_degree_sizes = set([])
num_atom_features_sizes = set([])
num_bond_features_sizes = set([])
num_molecules_sizes = set([])
for i in range(len(mols)):
# This asserts the shapes match within the tensors
(max_atoms, max_degree, num_atom_features, num_bond_features,
num_molecules) = mol_shapes_to_dims(mols[i])
max_atoms_sizes.add(max_atoms)
max_degree_sizes.add(max_degree)
num_atom_features_sizes.add(num_atom_features)
num_bond_features_sizes.add(num_bond_features)
num_molecules_sizes.add(num_molecules)
print('Testing: max_atoms is varying in size')
assert len(max_atoms_sizes) > 1
print('Testing: max_degree is varying in size')
assert len(max_degree_sizes) > 1
print('Testing: num_atom_features is constant in size')
assert len(num_atom_features_sizes) == 1
print('Testing: num_bond_features is constant in size')
assert len(num_bond_features_sizes) == 1
print('Testing: num_molecules is constant in size')
assert len(num_molecules_sizes) == 1
def unit_test_epoch_iterator(seed=None):
np.random.seed(seed)
n_datapoints = 50
batch_size = 13
epochs = 100
x_data = | np.random.rand(n_datapoints, 3, 6, 2) | numpy.random.rand |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light,md
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# # Selection on Observables
# ## Purpose
# The purpose of this notebook is to illustrate an example of the workflow outlined in [Brathwaite and Walker (2017)](https://arxiv.org/abs/1706.07502). This simple application aims at highlighting the importance of causal structure in estimating causal effects of interest reflecting changes resulting from policy proposals. The basic idea is to show that when we control for intermediate variables of some variable of interest in a causal graph, we never recover the true causal parameter on the variable of interest.
#
# This notebook uses the dataset and the MNL utility specification from [Brathwaite and Walker (2016)](https://arxiv.org/abs/1606.05900) for demonstration.
# The rest of the notebook is organized as follows:
# - Defining different causal graphs representing different views for how individuals make mode choices. These causal graphs are based on the MNL utility functions from [Brathwaite and Walker (2016)](https://arxiv.org/abs/1606.05900)
# - Simulating data based on the different beliefs about the data generating process illustrated by both causal graphs.
# - Perturbing one of the variables (e.g.: Travel Distance) to simulate a policy intervention.
# - Calculating and plotting the distributions of treatment effects according to different causal graphs.
# # Import Needed Libraries
# +
# Built-in libraries
import copy
import sys
from collections import defaultdict
from collections import OrderedDict
from functools import reduce
import causal2020.observables.availability as av
import causal2020.observables.distfit as distfit
import causal2020.observables.regression as reg
import causal2020.observables.simulation as sim
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pylogit as pl
import seaborn as sns
from causal2020.observables.graphs import BIKE_UTILITY
from causal2020.observables.graphs import DA_UTILITY
from causal2020.observables.graphs import DTW_UTILITY
from causal2020.observables.graphs import IND_UTILITY
from causal2020.observables.graphs import SHARED_2_UTILITY
from causal2020.observables.graphs import SHARED_3P_UTILITY
from causal2020.observables.graphs import WALK_UTILITY
from causal2020.observables.graphs import WTD_UTILITY
from causal2020.observables.graphs import WTW_UTILITY
from causal2020.observables.utils import is_notebook
from causalgraphicalmodels import CausalGraphicalModel
from checkrs.utils import simulate_choice_vector
from pyprojroot import here
# Third party libraries
# Local libraries
# -
# # Set Notebook Parameters
# +
SEED = 197
| np.random.seed(SEED) | numpy.random.seed |
# -*- coding: utf-8 -*-
import math
import numpy as np
# to build kernel params use the default values:
# p1Candidates = {'Linear', 'PolyPlus', 'Polynomial', 'Gaussian'};
# p2Candidates = {[], [2, 4], [2, 4], [0.01, 0.05, 0.1, 1, 10, 50, 100]};
# p3Candidates = {'Sample-Scale'};
def get_GLSPFS_kernel_options():
options = []
p1_candidates = ["Linear", "PolyPlus", "Polynomial", "Gaussian"]
p2_candidates = [[], [2, 4], [2, 4], [0.01, 0.05, 0.1, 1, 10, 50, 100]]
p3_candidates = ['Sample-Scale']
n2 = np.zeros([len(p1_candidates), 1])
for i2 in range(len(p1_candidates)):
n2[i2, 0] = max(1, len(p2_candidates[i2]))
for i1 in range(len(p1_candidates)):
for i2 in range(1, max(int(n2[i1, 0]), 1)):
for i3 in range(len(p3_candidates)):
tmp = p2_candidates[i1]
if p1_candidates[i1] == 'Gaussian':
options.append(
{
"KernelType": p1_candidates[i1],
"normType": p3_candidates[i3],
"t": tmp[i2]
}
)
elif p1_candidates[i1] == 'Polynomial' or p1_candidates[i1] == 'PolyPlus':
options.append(
{
"KernelType": p1_candidates[i1],
"normType": p3_candidates[i3],
"d": tmp[i2]
}
)
return options
# build GLSPFS params
# local_type_candi = {'LPP', 'LLE', 'LTSA'};
# knn_size_candi = 5;
# lambda1_candi = 10.^[-3:0];
# lambda2_candi = 10.^[-3:0];
def get_GLSPFS_params():
individuals = []
kernelOptions = get_GLSPFS_kernel_options()
local_type_candi = ["LPP", "LLE", "LTSA"]
knn_size_candi = [5, 8, 10]
lambda1_candi = [math.pow(10, x) for x in range(-5, 5)]
lambda2_candi = [math.pow(10, x) for x in range(-5, 5)]
for i1 in range(len(local_type_candi)):
for i3 in range(len(knn_size_candi)):
for i4 in range(len(lambda1_candi)):
for i5 in range(len(lambda2_candi)):
for i6 in range(len(kernelOptions)):
individuals.append(
{
"local_type": local_type_candi[i1],
"local_lpp_sigma": [],
"local_ltsa_embedded_dim": [],
"local_k": knn_size_candi[i3],
"lambda1": lambda1_candi[i4],
"lambda2": lambda1_candi[i5],
"global_kernel_option": kernelOptions[i6]
}
)
return np.array(individuals)
# iDetect parameters ####
# it = 20;
# distance = {'euclidean','block'};
# sigma = 10.^[-5:0];
# lambda = 10.^[1:3];
# new_lambda = 2.^[1:12];
def get_iDetect_params(default_configs):
if default_configs:
return np.array([{
"distance": "euclidean",
"it": 30,
"sigma": 1,
"lambda": 2
}])
individuals = []
distance = ["euclidean", "block"]
it = range(10, 50, 10)
sigma = [math.pow(10, x) for x in range(-8, 8)]
lamb = [math.pow(10, x) for x in range(1, 20)]
for i1 in range(len(distance)):
for i2 in range(len(it)):
for i3 in range(len(sigma)):
for i4 in range(len(lamb)):
individuals.append(
{
"distance": distance[i1],
"it": it[i2],
"sigma": sigma[i3],
"lambda": lamb[i4]
}
)
return np.array(individuals)
def get_LS_params(default_configs):
if default_configs:
return np.array([{
"mode_metric": ["euclidean"],
"mode_weight": ["heatKernel"],
"neighborMode": "knn",
"k": 5,
"t": 1
}])
individuals = []
mode_metric = ["euclidean", "cosine"]
mode_weight = ["binary", "heatKernel", "cosine"]
k = range(3, 20, 2)
t = range(1, 10, 2)
for i1 in range(len(mode_metric)):
for i2 in range(len(mode_weight)):
if (mode_metric[i1] == "euclidean" and mode_weight[i2] == "cosine")\
or (mode_metric[i1] == "cosine" and mode_weight[i2] == "heatKernel"):
continue
for i3 in range(len(k)):
for i4 in range(len(t)):
individuals.append(
{
"mode_metric": mode_metric[i1],
"mode_weight": mode_weight[i2],
"neighborMode": "knn",
"k": k[i3],
"t": t[i4]
}
)
return np.array(individuals)
def get_SPEC_params(objs, default_configs):
if default_configs:
return np.array([{
"mode_metric": ["euclidean"],
"mode_weight": ["heatKernel"],
"neighborMode": "knn",
"k": 5,
"t": 1,
"style": -1
}])
individuals = []
mode_metric = ["euclidean", "cosine"]
mode_weight = ["binary", "heatKernel", "cosine"]
k = range(3, 20, 2)
t = range(1, 10, 2)
style = range(-1, objs, 2)
for i1 in range(len(mode_metric)):
for i2 in range(len(mode_weight)):
if (mode_metric[i1] == "euclidean" and mode_weight[i2] == "cosine")\
or (mode_metric[i1] == "cosine" and mode_weight[i2] == "heatKernel"):
continue
for i3 in range(len(k)):
for i4 in range(len(t)):
for i5 in range(len(style)):
individuals.append(
{
"mode_metric": mode_metric[i1],
"mode_weight": mode_weight[i2],
"neighborMode": "knn",
"k": k[i3],
"t": t[i4],
"style": style[i5]
}
)
return | np.array(individuals) | numpy.array |
def np_equal_considering_nans(a, b):
'''
Are two nparrays equal, except when both a NaN?
Arguments
---------
a : array_like
b : array_like
Returns
-------
boolean
'''
import numpy as np
a_is_nan = np.isnan(a)
b_is_nan = np.isnan(b)
if | np.array_equal(a_is_nan, b_is_nan) | numpy.array_equal |
# -*- coding: utf-8 -*-
import six
import numpy as np
import pytest
import sksurgerycore.algorithms.procrustes as p
def test_empty_fixed():
with pytest.raises(TypeError):
p.orthogonal_procrustes(None, np.ones((1, 3)))
def test_empty_moving():
with pytest.raises(TypeError):
p.orthogonal_procrustes(np.ones((1, 3)), None)
def test_three_columns_fixed():
with pytest.raises(ValueError):
p.orthogonal_procrustes(np.ones((3, 4)), np.ones((3, 3)))
def test_three_columns_moving():
with pytest.raises(ValueError):
p.orthogonal_procrustes(np.ones((3, 3)), np.ones((3, 4)))
def test_at_least_three_points_fixed():
with pytest.raises(ValueError):
p.orthogonal_procrustes(np.ones((1, 3)), np.ones((3, 3)))
def test_at_least_three_points_moving():
with pytest.raises(ValueError):
p.orthogonal_procrustes(np.ones((3, 3)), np.ones((1, 3)))
def test_same_number_points():
with pytest.raises(ValueError):
p.orthogonal_procrustes(np.ones((4, 3)), np.ones((5, 3)))
def test_identity_result():
fixed = np.zeros((3, 3))
fixed[0][1] = 1
fixed[2][0] = 2
moving = np.zeros((3, 3))
moving[0][1] = 1
moving[2][0] = 2
rotation, translation, error = p.orthogonal_procrustes(fixed, moving)
assert np.allclose(rotation, np.eye(3), 0.0000001)
assert np.allclose(translation, np.zeros((3, 1)), 0.0000001)
assert error < 0.0000001
def test_reflection_data():
"""This seems to be testing that a rotation
is prefered to a reflection. To transform these
points you can either reflect through yz, or
rotate 180 about y"""
fixed = np.zeros((4, 3))
fixed[0][1] = 1
fixed[2][0] = 2
fixed[3][0] = 4
moving = np.zeros((4, 3))
moving[0][1] = 1
moving[2][0] = -2
moving[3][0] = -4
rotation, translation, error = p.orthogonal_procrustes(fixed, moving)
expected_rotation = np.eye(3)
expected_rotation[0][0] = -1
expected_rotation[2][2] = -1
print (np.linalg.det(rotation))
print (rotation)
assert np.allclose(rotation, expected_rotation, 0.0000001)
assert np.allclose(translation, np.zeros((3, 1)), 0.0000001)
assert error < 0.0000001
def test_reflection_BARD_data():
"""This is a reflection test using data taken from using
BARD at the 2019 summer school. This data set will give
you reflections, unless you replace equation 13 from Arun
with Fitzpatrick's modification. This was done at issue #19"""
ct_fids = np.zeros((4,3))
world_fids = | np.zeros((4,3)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""Console script for exo."""
import errno
import math
import sys
import click
import numpy as np
# Adapted Java treeview image compression algorithm
def rebin(a, new_shape):
M, N = a.shape
m, n = new_shape
if m >= M:
# repeat rows in data matrix
a = np.repeat(a, math.ceil(float(m) / M), axis=0)
M, N = a.shape
m, n = new_shape
row_delete_num = M % m
col_delete_num = N % n
np.random.seed(seed=0)
if row_delete_num > 0:
# select deleted rows with equal intervals
row_delete = np.linspace(0, M - 1, num=row_delete_num, dtype=int)
# sort the random selected deleted row ids
row_delete = np.sort(row_delete)
row_delete_plus1 = row_delete[1:-1] + \
1 # get deleted rows plus position
# get deleted rows plus position (top +1; end -1)
row_delete_plus1 = np.append(
np.append(row_delete[0] + 1, row_delete_plus1), row_delete[-1] - 1)
# put the info of deleted rows into the next rows by mean
a[row_delete_plus1, :] = (
a[row_delete, :] + a[row_delete_plus1, :]) / 2
a = np.delete(a, row_delete, axis=0) # random remove rows
if col_delete_num > 0:
# select deleted cols with equal intervals
col_delete = np.linspace(0, N - 1, num=col_delete_num, dtype=int)
# sort the random selected deleted col ids
col_delete = np.sort(col_delete)
col_delete_plus1 = col_delete[1:-1] + \
1 # get deleted cols plus position
# get deleted cols plus position (top +1; end -1)
col_delete_plus1 = np.append(
np.append(col_delete[0] + 1, col_delete_plus1), col_delete[-1] - 1)
# put the info of deleted cols into the next cols by mean
a[:, col_delete_plus1] = (
a[:, col_delete] + a[:, col_delete_plus1]) / 2
a = | np.delete(a, col_delete, axis=1) | numpy.delete |
import matplotlib
matplotlib.use('Agg')
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import scipy.io
import sunpy.time
import pickle
import seaborn as sns
import shutil
import datetime
from astropy.time import Time
import pdb
from sympy.solvers import solve
from sympy import Symbol
import multiprocessing
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
import glob
import h5py
import astropy.units as u
# ###################################################### functions
def getShockNormalAngle(pos, longell, rell, timeind, frameTime, ArrTime, plotLines):
# print('TI: ', mdates.num2date(frameTime))
# print('AT: ', mdates.num2date(ArrTime))
# print('Tdiff [min]: ', TimeDiff)
LonEarth = pos.earth[1, timeind]# + 0.55
# print('LonEll: ', longell)
# print('lonEarth: ', LonEarth)
minDiffLonEll = min(abs(longell-LonEarth))
indMinLon = np.where(abs(longell-LonEarth) == minDiffLonEll)[0]
EarthHit = False
if indMinLon < np.size(longell)-1 and indMinLon > 0:
EarthHit = True
TimeDiff = 100
# if ArrTime != b' -1':
if ArrTime != float('Nan'):
TimeDiff = abs(frameTime - ArrTime)*60*24
if EarthHit and TimeDiff < 30:
REarth = pos.earth[0, timeind]
# plt.plot([0, LonEarth], [0, REarth], color='pink', lw=0.8, alpha=1)
#if plotLines:
# plt.scatter(longell[indMinLon-1], rell[indMinLon-1], s=2)
# plt.scatter(longell[indMinLon+1], rell[indMinLon+1], s=2)
x = rell[indMinLon]*np.cos(longell[indMinLon])
y = rell[indMinLon]*np.sin(longell[indMinLon])
x = REarth*np.cos(LonEarth)
y = REarth*np.sin(LonEarth)
x1 = rell[indMinLon-1]*np.cos(longell[indMinLon-1])
x2 = rell[indMinLon+1]*np.cos(longell[indMinLon+1])
y1 = rell[indMinLon-1]*np.sin(longell[indMinLon-1])
y2 = rell[indMinLon+1]*np.sin(longell[indMinLon+1])
k = (y1-y2)/(x1-x2)
d = y1-k*x1
#normale: steigung = -1/k
fact = 1
#if x[ind] < 0:
# fact = -1
kNew = -1/k
dNew = y-kNew*x
dCent = 0
kCent = y/x
alpha = np.arctan(kCent)
# print('kCent [°]: ', np.rad2deg(alpha))
# alpha = arctan(abs((m1-m2)/(1+m1*m2)))
angleDiff = np.arctan((kNew-kCent)/(1+kNew*kCent))
angleDiffDeg = np.rad2deg(angleDiff)
alpha = np.arctan(kNew)
# print('kNew [°]: ', np.rad2deg(alpha))
dist = 0.2
#print('x: ', x)
#print('y: ', y)
#print('rell: ', rell[indMinLon])
#print('longell: ', longell[indMinLon])
tmpXN = dist*np.cos(alpha) + x
tmpYN = dist*np.sin(alpha) + y
rellNew = np.sqrt(tmpXN ** 2 + tmpYN ** 2)
longellNew = np.arctan2(tmpYN, tmpXN)
r1 = np.sqrt(x1 ** 2 + y1 ** 2)
l1 = np.arctan2(y1, x1)
r2 = np.sqrt(x2 ** 2 + y2 ** 2)
l2 = np.arctan2(y2, x2)
# if plotLines:
# plt.plot([LonEarth, longellNew], [REarth, rellNew], color='black', lw=0.3, alpha=1)
# print('angle Diff [°]= ', angleDiffDeg)
return angleDiffDeg[0]
def plot_bgsw_speed(time, speed, angle, label, vmin, vmax, plotPath): #arr = np.array(np.size(time_b), max(speed_b) - min(speed_b))
ysize = np.int(max(speed) - min(speed))
xsize = np.size(time)
arr = np.zeros(shape=(xsize, ysize))
for i in np.arange(0, xsize):
arr[i,:] = speed[i]
elons = np.zeros(xsize)
for i in np.arange(0, np.size(elons)):
elons[i] = i +1
fig = plt.figure(figsize=(16, 5))
ax1 = fig.add_subplot(111)
ax1.grid(b = None, axis='both')
#cf = ax1.imshow(arr.T, cmap=plt.cm.get_cmap('rainbow'), vmin=vmin, vmax=vmax, aspect = (xsize / ysize), origin='lower')
cf = ax1.imshow(arr.T, cmap=plt.cm.get_cmap('coolwarm'), vmin=vmin, vmax=vmax, aspect = (xsize / ysize), origin='lower')
#ax = plt.axes()
plt.yticks([])
plt.xticks(np.arange(xsize), time, rotation = 45)
ax1.xaxis.set_major_locator(plt.MaxNLocator(np.int(xsize/8)))
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax2.grid(b = None, axis='both')
ax2.set_ylabel('Elongation [°]') # we already handled the x-label with ax1
ax2.plot(time, np.rad2deg(angle), 'black')
ax2.yaxis.set_ticks_position('left')
ax2.yaxis.set_label_position('left')
ax2.xaxis.set_major_locator(plt.MaxNLocator(np.int(xsize/8)))
ax2.legend([label], handlelength=0, handletextpad=0, loc='upper left')
cax = plt.axes([-0.01, 0.125, 0.02, 0.75])
cbar = plt.colorbar(cf, cax=cax, ticks=np.arange(vmin, vmax, 50))
cbar.set_label('Solar wind speed [km/s]')
plt.savefig(plotPath + 'BGSW_' + label + '.png', dpi=300, bbox_inches='tight')
# clears plot window
plt.clf()
def plot_BGSW_tangent(path):
######################################################
######################################################
# FOR a nicer plot see 'PlotAmbientSolarWinds.ipynb' #
######################################################
######################################################
#path = 'HI_animate/events/test/20100203_AB/'
[tpWind_a, tpWind_b, et_time_a, et_time_b, angle_a, angle_b, tp_a, tp_b] = pickle.load(
open(path + 'tpWind_AB.p', "rb"))
#[tpWind_a, et_time_a] = pickle.load(
# open('HI_animate/events/test/20100203_A/tpWind_A.p', "rb"))
fig = plt.figure(figsize=(16, 8))
time_a = []
speed_a = []
for i in np.arange(0, np.int(np.size(tpWind_a)/2)):
#print((tpWind_a[i][0])[0:19])
time_a.append((tpWind_a[i][0])[0:19])
speed_a.append(tpWind_a[i][1])
time_b = []
speed_b = []
for i in np.arange(0, np.int(np.size(tpWind_b)/2)):
time_b.append((tpWind_b[i][0])[0:19])
speed_b.append(tpWind_b[i][1])
#x = time_a
x = mdates.date2num(Time.strptime(time_a, '%Y-%m-%d %H:%M:%S').datetime)
x = x - x.min()
y = np.arange(0, len(x), 1)
y = np.array(np.rad2deg(angle_a))
speeds = np.array(speed_a)
ymin = 0
ymax = np.round(np.nanmax([np.rad2deg(angle_a), np.rad2deg(angle_b)]),-1)+10
# Create a set of line segments so that we can color them individually
# This creates the points as a N x 1 x 2 array so that we can stack points
# together easily to get the segments. The segments array for line collection
# needs to be (numlines) x (points per line) x 2 (for x and y)
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
plt.rcParams.update({'font.size': 21})
fig, axs = plt.subplots(2, 1, sharex=True, sharey=True, figsize=[16,10])
# Create a continuous norm to map from data points to colors
norm = plt.Normalize(vmin, vmax)
lc = LineCollection(segments, cmap='coolwarm', norm=norm)
# Set the values used for colormapping
lc.set_array(speeds)
lc.set_linewidth(7)
line = axs[0].add_collection(lc)
#fig.colorbar(line, ax=axs[0])
axs[0].set_xlim(x.min(), x.max())
axs[0].set_ylim(ymin, ymax)
axs[0].set_ylabel('Elongation [°]')
#x = time_a
x = mdates.date2num(Time.strptime(time_b, '%Y-%m-%d %H:%M:%S').datetime)
x = x - x.min()
y = np.array(np.rad2deg(angle_b))
speeds = np.array(speed_b)
# Create a set of line segments so that we can color them individually
# This creates the points as a N x 1 x 2 array so that we can stack points
# together easily to get the segments. The segments array for line collection
# needs to be (numlines) x (points per line) x 2 (for x and y)
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
norm = plt.Normalize(vmin, vmax)
lc = LineCollection(segments, cmap='coolwarm', norm=norm)
# Set the values used for colormapping
lc.set_array(speeds)
lc.set_linewidth(7)
line = axs[1].add_collection(lc)
axs[1].set_xlim(x.min(), x.max())
axs[1].set_ylim(ymin, ymax)
plt.yticks(np.arange(ymin, ymax, 20.0))
#plt.xticks(np.arange(x.min(), x.max(), 0.083))
plt.xticks(x[0::12], time_a[0::12])
axs[1].set_ylabel('Elongation [°]')
plt.setp(axs[1].xaxis.get_majorticklabels(), rotation=25)
#fig.text(0.02, 0.5, 'Elongation [°]', ha='center', va='center', rotation='vertical')
cax = plt.axes([0.92, 0.125, 0.015, 0.755])
cbar = plt.colorbar(line, cax=cax, ticks=np.arange(vmin, vmax, 40))
cbar.set_label('Solar wind speed [km/s]')
axs[0].text(0.2, ymax-5, 'a)', fontsize=28, ha='center', va='top', wrap=True)
axs[1].text(0.2, ymax-5, 'b)', fontsize=28, ha='center', va='top', wrap=True)
fig.savefig(path + '/BGSW_elon.png',
bbox_inches="tight")
fig.clf()
plt.close('all')
print('done')
# ###################################################### functions
# for reading catalogues
def getcat(filename):
print('reading CAT ' + filename)
cat = scipy.io.readsav(filename) # , verbose='false')
print('done reading CAT')
return cat
def decode_array(bytearrin):
# for decoding the strings from the IDL .sav file to a list of python
# strings, not bytes make list of python lists with arbitrary length
bytearrout = ['' for x in range(len(bytearrin))]
for i in range(0, len(bytearrin) - 1):
bytearrout[i] = bytearrin[i].decode()
# has to be np array so to be used with numpy "where"
bytearrout = np.array(bytearrout)
return bytearrout
def time_to_num_cat(time_in):
# for time conversion from catalogue .sav to numerical time
# this for 1-minute data or lower time resolution
# for all catalogues
# time_in is the time in format: 2007-11-17T07:20:00 or 2007-11-17T07:20Z
# for times help see:
# http://docs.sunpy.org/en/latest/guide/time.html
# http://matplotlib.org/examples/pylab_examples/date_demo2.html
j = 0
# time_str=np.empty(np.size(time_in),dtype='S19')
time_str = ['' for x in range(len(time_in))]
# =np.chararray(np.size(time_in),itemsize=19)
time_num = np.zeros(np.size(time_in))
for i in time_in:
# convert from bytes (output of scipy.readsav) to string
time_str[j] = time_in[j][0:16].decode() + ':00'
year = int(time_str[j][0:4])
time_str[j]
# convert time to sunpy friendly time and to matplotlibdatetime
# only for valid times so 9999 in year is not converted
# pdb.set_trace()
if year < 2100:
time_num[j] = mdates.date2num(Time.strptime(time_str[j], '%Y-%m-%dT%H:%M:%S').datetime)
j = j + 1
# the date format in matplotlib is e.g. 735202.67569444
# this is time in days since 0001-01-01 UTC, plus 1.
# return time_num which is already an array and convert the list of strings
# to an array
return time_num, np.array(time_str)
def roundTime(dt=None, roundTo=60):
# Round a datetime object to any time lapse in seconds
# dt : datetime.datetime object, default now.
# roundTo : Closest number of seconds to round to, default 1 minute.
# Author: <NAME> 2012 - Use it as you want but don't blame me.
if dt is None:
dt = datetime.datetime.now()
seconds = (dt.replace(tzinfo=None) - dt.min).seconds
rounding = (seconds + roundTo / 2) // roundTo * roundTo
return dt + datetime.timedelta(0, rounding - seconds, -dt.microsecond)
def getTangentPoint(a, b, xc, yc, px, py, elon, sc, plot):
tilt = 90
pxOri = px
pyOri = py
px = px - xc
py = py - yc
ti = np.deg2rad(elon)
pxRot = px*np.cos(ti) - py*np.sin(ti)
pyRot = px*np.sin(ti) + py*np.cos(ti)
px = pxRot
py = pyRot
ellipseResolution = 211
circ_ang = ((np.arange(ellipseResolution) * 2 - (ellipseResolution-1)) * np.pi / 180)
xe = b * np.cos(circ_ang) # Parameterized equation of ellipse
ye = a * np.sin(circ_ang)
cosang = np.cos(tilt * np.pi / 180)
sinang = np.sin(tilt * np.pi / 180)
xell = xe * cosang - ye * sinang # Rotate to desired
# position angle
yell = xe * sinang + ye * cosang
if py != 0:
xSolve = Symbol('xSolve')
xSol = solve(b**2*xSolve**2 + a**2*((a**2*b**2-b**2*xSolve*px)/(a**2*py))**2-a**2*b**2, xSolve)
#print(xSol)
xs = []
for xst in xSol:
xs.append(float(xst))
#print(xs)
xs =[np.max(xs)]
ys = []
ytmp = Symbol('ytmp')
for xtmp in xs:
tmp = solve((b**2*xtmp**2 + a**2*ytmp**2 - a**2*b**2))
ys.append(tmp)
if sc == 'A':
if np.max(xell) < px:
ys = np.min(ys)
else:
ys = np.max(ys)
if sc == 'B':
if np.max(xell) < px:
ys = np.max(ys)
else:
ys = np.min(ys)
xt1 = 0
xt2 = 0
yt1 = 0
yt2 = 0
if plot == 1:
#d = (py - k * px)
k = Symbol('k')
kSol = solve(a**2*k**2+b**2-(py-k*px)**2, k)
#print('kSol =', kSol)
k1 = float(kSol[0])
d1 = (py - k1*px)
k2 = float(kSol[1])
d2 = (py - k2*px)
#y = (k*x + d)
xtest = np.arange(-1, 1, 0.005)
plt.gca().set_aspect('equal', adjustable='box')
plt.xlim(-1,1)
plt.ylim(-1,1)
plt.plot(xell, yell)
plt.scatter(px, py, s=10, c='red')
for i in np.arange(0, np.size(xtest)):
plt.scatter(xtest[i], k1*xtest[i] + d1, s=1, c='red')
plt.scatter(xtest[i], k2*xtest[i] + d2, s=1, c='red')
for i in np.arange(0, np.size(xs)):
plt.scatter(xs[i], ys, s=10, c='black')
plt.xlim(-1,1)
plt.ylim(-1,1)
#ytest2 = k2*xtest + d2
#ytest1 = k1*xtest + d1
#xt1 = xtest*np.cos(ti) - ytest1*np.sin(ti) + xc
#yt1 = xtest*np.sin(ti) + ytest1*np.cos(ti) + yc
#xt2 = xtest*np.cos(ti) - ytest2*np.sin(ti) + xc
#yt2 = xtest*np.sin(ti) + ytest2*np.cos(ti) + yc
points = []
ti = -ti
for i in np.arange(0, np.size(xs)):
xRot = xs[i]*np.cos(ti)-ys*np.sin(ti) + xc
yRot = xs[i]*np.sin(ti)+ys*np.cos(ti) + yc
points.append([xRot, yRot])
a = np.array(np.float64(points[0]))
b = np.array([pxOri, pyOri])
c = np.array([0,0])
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle)
return points, xt1, yt1, xt2, yt2, angle
def plot_ellipse(ax, dayjump, pos, timeind, cmeind, k, all_apex_f, all_apex_w,
all_apex_r, all_apex_lon, all_apex_s, all_apex_flag,
frame_time_num, et_time_num_interp, et_elon_interp,
et_time_num, startcutFit, endcutFit, tangentPoints, frontData, ELEvoHIResults, shockAngles, diffMeasure):
# Plot all the different ellipses (different runs for each time step) with
# the elongation profile
r_sun = 695700.
au = 149597870.
slope = np.NaN
intercept = np.NaN
angle = np.NaN
stopAnimation = False
# ############################## plot all active CME ellipses
if np.size(cmeind) > 0:
# print(np.size(cmeind))
indizes = np.arange(0, np.size(cmeind))
indizes = np.roll(indizes, -1)
for p in indizes:
# for p in range(0, np.size(cmeind)):
# if p == 0:
if True:
# print('CME active ',p)
# print('f: ', all_apex_f[cmeind[0][p]])
# print('lon: ', all_apex_lon[cmeind[0][p]])
# print('w: ', np.rad2deg(all_apex_w[cmeind[0][p]]))
# print('lon: ', all_apex_s[cmeind[0][p]], all_apex_lon[cmeind[0][p]])
# derive values for ellipse
phi = all_apex_lon[cmeind[0][p]]
lamb = np.rad2deg(all_apex_w[cmeind[0][p]])
f = all_apex_f[cmeind[0][p]]
# print('phi', phi)
# print('lamb', lamb)
# print('f', f)
LonE = pos.earth[1, timeind]
LonST = pos.sta[1, timeind]
if all_apex_s[cmeind[0][p]] == 'B':
LonST = pos.stb[1, timeind]
# print('diff: ', np.rad2deg(LonST - LonE) - phi, lamb, f)
# print(ELEvoHIResults[:,p])
theta = np.arctan((all_apex_f[cmeind[0][p]] ** 2) * np.tan(
all_apex_w[cmeind[0][p]]))
omega = np.sqrt((np.cos(theta) ** 2) * (
all_apex_f[cmeind[0][p]] ** 2 - 1) + 1)
# ellipse values, depending on R and lamda and f, from Moestl et
# al. 2015
# Nat. Comm.
b = (all_apex_r[cmeind[0][p]] * omega * np.sin(
all_apex_w[cmeind[0][p]])) / (np.cos(
all_apex_w[cmeind[0][p]] - theta) + omega * np.sin(
all_apex_w[cmeind[0][p]]))
a = b / all_apex_f[cmeind[0][p]]
c = all_apex_r[cmeind[0][p]] - b # center distance of ellipse
# ellipse apex and center
# [xapex,yapex]=np.array([np.cos(all_apex_lon[cmeind[0][p]]*np.pi/180),np.sin(all_apex_lon[cmeind[0][p]]*np.pi/180)])*all_apex_r[cmeind[0][p]]
[xc, yc] = np.array([np.cos(
all_apex_lon[cmeind[0][p]] * np.pi / 180),
np.sin(all_apex_lon[cmeind[0][p]] * np.pi / 180)]) * c
#ellipseResolution = 211
# circ_ang = ((np.arange(111) * 2 - 110) * np.pi / 180)
circ_ang = ((np.arange(101) * 2 - 100) * np.pi / 180)
# circ_ang1 = ((np.arange(0, 201, 0.5) * 2 - 100) * np.pi / 180)
# circ_ang1 = circ_ang1[0:201]
# circ_ang = circ_ang1
#circ_ang = ((np.arange(ellipseResolution) * 2 - (ellipseResolution-1)) * np.pi / 180
#circ_ang = (np.arange(-180, 180.25, 0.25) * np.pi / 180)
xe = b * np.cos(circ_ang) # Parameterized equation of ellipse
ye = a * np.sin(circ_ang)
# rotation angle
cosang = np.cos(all_apex_lon[cmeind[0][p]] * np.pi / 180)
# -np.deg2rad(90))
sinang = np.sin(all_apex_lon[cmeind[0][p]] * np.pi / 180)
# -np.deg2rad(90))
xell = xc + xe * cosang - ye * sinang # Rotate to desired
# position angle
yell = yc + xe * sinang + ye * cosang
#xell = xc + xe
#yell = yc + ye
rell = np.sqrt(xell ** 2 + yell ** 2)
longell = np.arctan2(yell, xell)
# print('r0: ', all_apex_r[cmeind[0][p]])
# print(rell)
frameTime = frame_time_num + k
#print('f: ', all_apex_f[cmeind[0][p]])
#print('min lon: ', np.min(longell))
#print('max lon: ', np.max(longell))
# plot in correct color
if all_apex_s[cmeind[0][p]] == 'A':
# make alpha dependent on distance to solar equatorial plane
# ax.plot(longell,rell, c='grey', alpha=1-abs(all_apex_lat[
# cmeind[0][p]]/50),
# lw=1.5)
#if True:
# ax.plot(longell, rell, c='red', alpha=1, lw=0.8)
#else:
# ax.plot(longell, rell, c='red', alpha=0.08, lw=1.5)
stx = pos.sta[0, timeind] * np.cos(pos.sta[1, timeind])
sty = pos.sta[0, timeind] * np.sin(pos.sta[1, timeind])
elon = all_apex_lon[cmeind[0][p]]
# pos.earth[1, timeind], pos.earth[0, timeind]
# shockAngle = getShockNormalAngle(pos, longell, rell, timeind, frameTime, ELEvoHIResults[:,p][3], False)
# print('SA: ', shockAngle)
# if shockAngle != None:
# shockAngles.append(shockAngle)
# frontKins = frontData.frontkins[p]
# frontTimeNum = []
# for tn in frontKins.timearr:
# frontTimeNum.append(mdates.date2num(Time.strptime(tn, '%Y-%m-%dT%H:%M:%S.%f').datetime))
# frontTime = np.array(frontTimeNum)
# ftArrTime = mdates.date2num(Time.strptime(frontKins.arrtimeearth, '%Y-%m-%dT%H:%M:%S.%f').datetime)
frontKins = frontData[p]
frontTime = frontKins.timearr
ftArrTime = frontKins.arrtimeearth
#print('ft: ', mdates.num2date(frontTime))
indFront = np.where(abs(frontTime - frameTime) == min(abs(frontTime - frameTime)))[0]
indexFront = 0
if len(indFront) > 0:
indexFront = indFront[0]
# get a measure for how deformed the CME front is
rEllipse = rell
longEllipse = longell
if indexFront != 0:
rell = frontKins.frontarr[:,indexFront]*r_sun/au
longell = np.deg2rad(frontKins.longitude*-1)
apexR = rell[int(len(rell)/2)]
if (apexR > 1.0):
dM = np.array(diffMeasure)
rDiff = np.abs(rell - rEllipse)
if len(dM) == 0:
diffMeasure.append([p, np.mean(rDiff), np.std(rDiff)])
elif p not in dM[:,0]:
diffMeasure.append([p, np.mean(rDiff), np.std(rDiff)])
shockAngle = getShockNormalAngle(pos, longell, rell, timeind, frameTime, ftArrTime, True)
if shockAngle != None:
print('DefFront ShockNormal A: ', shockAngle)
shockAngles.append(shockAngle)
if p == 0:
ax.plot(longell, rell, c='darkred', alpha=1, lw=0.8)
else:
ax.plot(longell, rell, c='red', alpha=0.08, lw=0.4)
if indexFront == 0:
ax.plot(longEllipse, rEllipse, c='red', alpha=0.08, lw=0.4)
if p == 0:
ax.plot(longEllipse, rEllipse, c='forestgreen', alpha=1, lw=0.8)
#if indexFront > 0 and rell[0] == 0:
#stopAnimation = True
#print('stop animation A')
if False: # remove, use next line
#if p == 0:
points, xtest1, ytest1, xtest2, ytest2, angle = getTangentPoint(b, a, xc, yc, stx, sty, -elon, all_apex_s[cmeind[0][p]], 0)
tpR = np.sqrt(float(points[0][0]) ** 2 + float(points[0][1]) ** 2)
tpLon = np.arctan2(float(points[0][1]), float(points[0][0]))
ax.plot([pos.sta[1, timeind], tpLon], [pos.sta[0, timeind], tpR], c='black', linewidth=0.2, alpha = 1)
tangentPoints.append([tpR, tpLon])
#ax.scatter(np.deg2rad(all_apex_lon[cmeind[0][p]]), all_apex_r[cmeind[0][p]], s=0.7, c='green')
for pt in tangentPoints:
tpR = pt[0]
tpLon = pt[1]
ax.scatter(tpLon, tpR, s=0.5, c='red')
if all_apex_s[cmeind[0][p]] == 'B':
# ax.plot(longell,rell, c='royalblue', alpha=1-abs(
# all_apex_lat[cmeind[0][p]]/50), lw=1.5)
frontKins = frontData[p]
frontTime = frontKins.timearr
ftArrTime = frontKins.arrtimeearth
frameTime = frame_time_num + k
#print('ft: ', mdates.num2date(frontTime))
indFront = np.where(abs(frontTime - frameTime) == min(abs(frontTime - frameTime)))[0]
indexFront = 0
if len(indFront) > 0:
indexFront = indFront[0]
#print('FT: ', mdates.num2date(frontTime[indexFront]))
rEllipse = rell
longEllipse = longell
if indexFront != 0:
rEllipse = rell
longEllipse = longell
rell = frontKins.frontarr[:,indexFront]*r_sun/au
longell = np.deg2rad(frontKins.longitude*-1)
apexR = rell[int(len(rell)/2)]
if (apexR > 1.0):
dM = np.array(diffMeasure)
rDiff = np.abs(rell - rEllipse)
if len(dM) == 0:
diffMeasure.append([p, np.mean(rDiff), np.std(rDiff)])
elif p not in dM[:,0]:
diffMeasure.append([p, np.mean(rDiff), np.std(rDiff)])
shockAngle = getShockNormalAngle(pos, longell, rell, timeind, frameTime, ftArrTime, True)
if shockAngle != None:
print('DefFront ShockNormal B: ', shockAngle)
shockAngles.append(shockAngle)
if p == 0:
ax.plot(longell, rell, c='navy', alpha=1, lw=0.8)
else:
ax.plot(longell, rell, c='blue', alpha=0.08, lw=0.4)
if indexFront == 0:
ax.plot(longEllipse, rEllipse, c='blue', alpha=0.08, lw=0.4)
if p == 0:
ax.plot(longEllipse, rEllipse, c='forestgreen', alpha=1, lw=0.8)
#if indexFront > 0 and rell[0] == 0:
#stopAnimation = True
#print('Stop animation B')
#print('index front: ', indexFront)
#print('rell[0]: ', rell[0])
#print('p: ', p)
#print('FrameTime: ', mdates.num2date(frameTime))
if False:
if p == 0:
stx = pos.stb[0, timeind] * np.cos(pos.stb[1, timeind])
sty = pos.stb[0, timeind] * np.sin(pos.stb[1, timeind])
elon = all_apex_lon[cmeind[0][p]]
points, xtest1, ytest1, xtest2, ytest2, angle = getTangentPoint(b, a, xc, yc, stx, sty, -elon, all_apex_s[cmeind[0][p]], 0)
tpR = np.sqrt(float(points[0][0]) ** 2 + float(points[0][1]) ** 2)
tpLon = np.arctan2(float(points[0][1]), float(points[0][0]))
ax.plot([pos.stb[1, timeind], tpLon], [pos.stb[0, timeind], tpR], c='black', linewidth=0.2, alpha = 1)
tangentPoints.append([tpR, tpLon])
#ax.scatter(np.deg2rad(all_apex_lon[cmeind[0][p]]), all_apex_r[cmeind[0][p]], s=0.7, c='green')
for pt in tangentPoints:
tpR = pt[0]
tpLon = pt[1]
ax.scatter(tpLon, tpR, s=0.5, c='blue')
# ##############################plot elongation
# difference array of current frame time frame_time_num+k to
# position time
# frame_time_num
elondt = frame_time_num + k - et_time_num_interp
# get indices where difference is less than half the time resolution
elonind = np.where(abs(elondt) < dayjump / 2.0)
# print( 'elonind', cmeind)
if np.size(elonind) > 0:
tangent_size = 1.2 # AU
tangent_size = np.arange(0, 1.2, 0.0025)
if all_apex_s[cmeind[0][p]] == 'B':
# for ElEvoHI2 paper Amerstorfer et al. 2017
# ######## add tangent from STEREO-B to ellipseusing the time
# elongation profile
# this is the currently active epsilon for the active CME
angletox = np.deg2rad(180 - et_elon_interp[elonind[0]] - abs(
np.rad2deg(pos.stb[1, timeind]))) # +np.pi/2
# make x y coordinates of tangent vector from 0/0
vecx1 = tangent_size * np.cos(angletox)
vecy1 = tangent_size * np.sin(angletox)
stx = pos.stb[0, timeind] * np.cos(pos.stb[1, timeind])
sty = pos.stb[0, timeind] * np.sin(pos.stb[1, timeind])
elonx1 = stx + vecx1
elony1 = sty + vecy1
if sty > 0:
elony1 = sty - vecy1
elonr = np.sqrt(elonx1 ** 2 + elony1 ** 2)
elonlong = np.arctan2(elony1, elonx1)
if (frame_time_num + k > et_time_num[startcutFit] and
frame_time_num + k < et_time_num[endcutFit]):
ax.plot(
[pos.stb[1, timeind], elonlong[-1]],
[pos.stb[0, timeind], elonr[-1]], c='navy', alpha=1, lw=0.5)
else:
ax.plot(
[pos.stb[1, timeind], elonlong[-1]],
[pos.stb[0, timeind], elonr[-1]], c='navy', alpha=1,
lw=0.5, ls='--')
tangentPoint = get_tangentPoint(longell, rell, elonlong, elonr)
tangentPointCart = get_tangentPointCart(xell, yell, elonx1, elony1)
tpR = np.sqrt(tangentPointCart[0][0] ** 2 + tangentPointCart[0][1] ** 2)
tpLon = np.arctan2(tangentPointCart[0][1], tangentPointCart[0][0])
#ax.scatter(tangentPoint[0][0], tangentPoint[0][1], s=0.5, c='black')
#ax.scatter(tangentPoint[1][0], tangentPoint[1][1], s=0.5, c='blue')
#ax.scatter(tpLon, tpR, s=0.5, c='red')
if all_apex_s[cmeind[0][p]] == 'A':
# Original
angletox = np.deg2rad(90 - et_elon_interp[elonind[0]] - abs(
np.rad2deg(pos.sta[1, timeind])))
stx = pos.sta[0, timeind] * np.cos(pos.sta[1, timeind])
sty = pos.sta[0, timeind] * np.sin(pos.sta[1, timeind])
vecx1 = np.sin(angletox) * tangent_size
vecy1 = np.cos(angletox) * tangent_size
elonx1 = stx - vecx1
elony1 = sty - vecy1
if sty < 0:
elony1 = sty + vecy1
elonr = np.sqrt(elonx1 ** 2 + elony1 ** 2)
elonlong = np.arctan2(elony1, elonx1)
if (frame_time_num + k > et_time_num[startcutFit] and
frame_time_num + k < et_time_num[endcutFit]):
ax.plot(
[pos.sta[1, timeind], elonlong[-1]],
[pos.sta[0, timeind], elonr[-1]], c='darkred',
alpha=1, lw=0.5)
else:
ax.plot(
[pos.sta[1, timeind], elonlong[-1]],
[pos.sta[0, timeind], elonr[-1]], c='darkred', alpha=1,
lw=0.5, ls='--')
if False:
slope = (elony1 - sty) / (elonx1 - stx)
intercept = sty - slope * stx
slope = slope[-1]
intercept = intercept[-1]
return [slope, intercept, tangentPoints, angle, stopAnimation, shockAngles, diffMeasure]
def read_CME_data(read_data, dayjump, current_event_dir, ensemble_results,
d_days, cme_start_date_time, tracksav):
# read all the data needed (CME parameters and elongation profiles)
print('Start reading CME data')
# ############ read file with ensemble results, dump as pickle to use later
if read_data == 1:
print('start transforming front data')
frontFile = current_event_dir + '/results/frontDataAll.sav'
frontData = getcat(frontFile)
for i in np.arange(0, len(frontData.frontkins.timearr)):
for j in np.arange(0, len(frontData.frontkins.timearr[i])):
if frontData.frontkins.timearr[i][j] != b' -1':
frontData.frontkins.timearr[i][j] = mdates.date2num(
Time.strptime(frontData.frontkins.timearr[i][j], '%Y-%m-%dT%H:%M:%S.%f').datetime)
else:
frontData.frontkins.timearr[i][j] = float('Nan')
for i in np.arange(0, len(frontData.frontkins.arrtimeearth)):
if frontData.frontkins.arrtimeearth[i] != b' -1':
frontData.frontkins.arrtimeearth[i] = mdates.date2num(
Time.strptime(frontData.frontkins.arrtimeearth[i], '%Y-%m-%dT%H:%M:%S.%f').datetime)
else:
frontData.frontkins.arrtimeearth[i] = float('Nan')
frontDataKins = frontData.frontkins
h = getcat(current_event_dir + ensemble_results)
all_apex_t = h.elevo_kin.all_apex_t[0]
startcutFit = int(h.startcut)
endcutFit = int(h.endcut)
[all_apex_t_num_non_interp,
all_apex_t_num_non_interp_str] = time_to_num_cat(all_apex_t)
# get all parameters
all_apex_r_non_interp = h.elevo_kin.all_apex_r[0]
all_apex_lat_non_interp = h.elevo_kin.all_apex_lat[0] # degree
all_apex_lon_non_interp = h.elevo_kin.all_apex_lon[0] # degree
# f
all_apex_f_non_interp = h.elevo_kin.all_apex_f[0]
# width
all_apex_w_non_interp = np.deg2rad(h.elevo_kin.all_apex_w[0])
# constants
all_apex_s_non_interp = decode_array(h.elevo_kin.all_apex_s[0])
all_apex_run_non_interp = h.elevo_kin.runnumber[0]
all_apex_flag_non_interp = h.elevo_kin.colorflag[0]
if d_days == 0:
time_gt1AU = all_apex_t_num_non_interp[np.where(
all_apex_r_non_interp > 2.0)][0]
d_days = time_gt1AU + 1.0 - all_apex_t_num_non_interp[0]
d_days = np.round(d_days / dayjump) * dayjump
dur_days = d_days
if cme_start_date_time == '':
CME_start_time = mdates.date2num(roundTime(
Time.strptime(all_apex_t_num_non_interp_str[0], '%Y-%m-%dT%H:%M:%S').datetime,
roundTo=60 * 60))
# define cme frame times
h_time_num = np.arange(
CME_start_time, CME_start_time + d_days, dayjump)
# go through each run and interpolate data for each run
# final array size -> time array of CME frames * run numbers
finarrs = np.size(h_time_num) * np.size(
np.unique(all_apex_run_non_interp))
eventsize = np.size(h_time_num)
# initialise arrays
all_apex_t = np.zeros(finarrs)
all_apex_r = np.zeros(finarrs)
all_apex_lat = np.zeros(finarrs)
all_apex_lon = np.zeros(finarrs)
all_apex_f = np.zeros(finarrs)
all_apex_w = np.zeros(finarrs)
all_apex_s = [''] * finarrs
all_apex_run = np.zeros(finarrs)
all_apex_flag = np.zeros(finarrs)
print('start interpolation')
# for q in np.arange(0, np.max(all_apex_run_non_interp)):
for q in np.arange(0, np.size(np.unique(all_apex_run_non_interp))):
# print(q)
# get indices of kinematic data for this run
thisrunind = np.where(all_apex_run_non_interp == np.unique(
all_apex_run_non_interp)[q])
# if there is data available for this run, interpolate to CME times
if np.size(thisrunind) > 0:
# these variables change with time
# this is time, fill with frame times
all_apex_t[eventsize * q:eventsize * (q + 1)] = h_time_num
# fill with interpolation variables
all_apex_r[eventsize * q:eventsize * (q + 1)] = np.interp(
h_time_num, all_apex_t_num_non_interp[thisrunind],
all_apex_r_non_interp[thisrunind])
all_apex_lon[eventsize * q:eventsize * (q + 1)] = np.interp(
h_time_num, all_apex_t_num_non_interp[thisrunind],
all_apex_lon_non_interp[thisrunind])
all_apex_lat[eventsize * q:eventsize * (q + 1)] = np.interp(
h_time_num, all_apex_t_num_non_interp[thisrunind],
all_apex_lat_non_interp[thisrunind])
all_apex_f[eventsize * q:eventsize * (q + 1)] = np.interp(
h_time_num, all_apex_t_num_non_interp[thisrunind],
all_apex_f_non_interp[thisrunind])
all_apex_w[eventsize * q:eventsize * (q + 1)] = np.interp(
h_time_num, all_apex_t_num_non_interp[thisrunind],
all_apex_w_non_interp[thisrunind])
# fill with run numbers
all_apex_run[eventsize * q:eventsize * (
q + 1)] = all_apex_run_non_interp[thisrunind][0:eventsize]
# fill with flag numbers
all_apex_flag[eventsize * q:eventsize * (
q + 1)] = all_apex_flag_non_interp[thisrunind][0:eventsize]
# fill with observatory string
all_apex_s[eventsize * q:eventsize * (
q + 1)] = all_apex_s_non_interp[thisrunind][0:eventsize]
else: # set all to np.nan for this run
all_apex_t[eventsize * q:eventsize * (q + 1)] = np.nan
all_apex_r[eventsize * q:eventsize * (q + 1)] = np.nan
all_apex_lon[eventsize * q:eventsize * (q + 1)] = np.nan
all_apex_lat[eventsize * q:eventsize * (q + 1)] = np.nan
all_apex_f[eventsize * q:eventsize * (q + 1)] = np.nan
all_apex_w[eventsize * q:eventsize * (q + 1)] = np.nan
all_apex_run[eventsize * q:eventsize * (q + 1)] = np.nan
all_apex_s[eventsize * q:eventsize * (q + 1)] = ''
all_apex_flag[eventsize * q:eventsize * (q + 1)] = np.nan
print('end interpolation')
pickle.dump((all_apex_t, all_apex_r, all_apex_lat, all_apex_lon,
all_apex_f, all_apex_w, all_apex_s, all_apex_run,
all_apex_flag, CME_start_time, dur_days, startcutFit,
endcutFit, frontDataKins), open(current_event_dir + "all_apex_variables.p", "wb"))
if read_data == 0:
[all_apex_t, all_apex_r, all_apex_lat, all_apex_lon, all_apex_f,
all_apex_w, all_apex_s, all_apex_run, all_apex_flag,
CME_start_time, dur_days, startcutFit, endcutFit, frontDataKins] = pickle.load(
open(current_event_dir + 'all_apex_variables.p', "rb"))
if d_days == 0:
d_days = dur_days
if cme_start_date_time == '':
# CME_start_time = all_apex_t[np.isfinite(all_apex_t)][0]
# define cme frame times
h_time_num = np.arange(
CME_start_time, CME_start_time + d_days, dayjump)
# define times
if cme_start_date_time != '':
CME_start_time = mdates.date2num(Time.strptime(
cme_start_date_time, '%Y-%b-%d %H:%M:%S').datetime)
# define cme frame times
h_time_num = np.arange(
CME_start_time, CME_start_time + d_days, dayjump)
# h_time_str=mdates.num2date(h_time_num)
# ######## read and interpolate e-t profile to movie frame times - used for
# making line from spacecraft to front
# #et_time_num
# #h_time_num
# #et_elon
# #h_et_elon
#
#
# get elongation-time profile from track
et = getcat(current_event_dir + tracksav)
et_time = et.track.track_date[0]
et_time_num = time_to_num_cat(et_time)[0]
et_elon = et.track['elon'][0]
# todo automatic interpolate values
# linearly interpolate to hourly values make automatic later
# et_start_time=mdates.date2num(sunpy.time.parse_time(cme_start_date_time))
et_start_time = CME_start_time # +dayjump
# et_time_num_interp=np.arange(et_start_time,et_start_time+duration_days,dayjump)
et_time_num_interp = np.arange(et_start_time, max(
et_time_num), dayjump)
et_elon_interp = np.interp(et_time_num_interp, et_time_num, et_elon)
print('Finished reading CME data')
return [CME_start_time, d_days, startcutFit, endcutFit, all_apex_t,
all_apex_r, all_apex_lat, all_apex_lon, all_apex_f, all_apex_w,
all_apex_s, all_apex_run, all_apex_flag, et_time_num,
et_time_num_interp, et_elon_interp, frontDataKins]
def get_tangentPointCart(ell_x, ell_y, tangent_x, tangent_y):
minDist = 10000
indEll = -1
indTang = -1
for i in np.arange(0, np.size(ell_x)):
for j in np.arange(0, np.size(tangent_x)):
dist = (ell_x[i] - tangent_x[j]) ** 2 + (ell_y[i] - tangent_y[j]) ** 2
if dist < minDist:
minDist = dist
indEll = i
indTang = j
#print(minDist)
#print(ell_elon[indEll], ell_r[indEll])
#print(tangent_elon[indTang], tangent_r[indTang])
return [[ell_x[indEll], ell_y[indEll]], [tangent_x[indTang], tangent_y[indTang]], indEll, indTang]
def get_tangentPoint(ell_elon, ell_r, tangent_elon, tangent_r):
minDist = 100
indEll = -1
indTang = -1
for i in np.arange(0, np.size(ell_elon)):
for j in np.arange(0, | np.size(tangent_elon) | numpy.size |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = | np.array([]) | numpy.array |
from collections import defaultdict
import json
import re
import sys
import time
import matplotlib.pyplot as plt
from itertools import permutations
import numpy as np
import pandas as pd
from scipy.cluster.hierarchy import fcluster, linkage
from scipy.spatial.distance import pdist
from scipy.stats import lognorm
import seaborn as sns
from sklearn.cluster import DBSCAN
import statsmodels.nonparametric.api as smnp
#############################################################################
### Parameters
### Theoretical scale markers
### PYT = Pythagorean tuning
### EQ{N} = N-Tone Equal Temperament
### JI = Just intonation
### CHINA = Shi-er-lu
### The rest are sourced from Rechberger, Herman
PYT_INTS = np.array([0., 90.2, 203.9, 294.1, 407.8, 498.1, 611.7, 702., 792.2, 905., 996.1, 1109.8, 1200.])
EQ5_INTS = np.linspace(0, 1200, num=6, endpoint=True, dtype=float)
EQ7_INTS = np.linspace(0, 1200, num=8, endpoint=True, dtype=float)
EQ9_INTS = np.linspace(0, 1200, num=10, endpoint=True, dtype=float)
EQ10_INTS = np.linspace(0, 1200, num=11, endpoint=True, dtype=float)
EQ12_INTS = np.linspace(0, 1200, num=13, endpoint=True, dtype=float)
EQ24_INTS = np.linspace(0, 1200, num=25, endpoint=True, dtype=float)
EQ53_INTS = np.linspace(0, 1200, num=54, endpoint=True, dtype=float)
JI_INTS = np.array([0., 111.7, 203.9, 315.6, 386.3, 498.1, 590.2, 702., 813.7, 884.4, 1017.6, 1088.3, 1200.])
SLENDRO = np.array([263., 223., 253., 236., 225.])
PELOG = np.array([167., 245., 125., 146., 252., 165., 100.])
DASTGAH = np.array([0., 90., 133.23, 204., 294.14, 337.14, 407.82, 498., 568.72, 631.28, 702., 792.18, 835.2, 906., 996., 1039.1, 1109.77, 1200.])
TURKISH = {'T':203.8, 'K':181.1, 'S':113.2, 'B':90.6, 'F':22.6, 'A':271, 'E':67.9}
KHMER_1 = np.array([185., 195., 105., 195., 195., 185., 140.])
KHMER_2 = np.array([190., 190., 130., 190., 190., 190., 120.])
VIET = np.array([0., 175., 200., 300., 338., 375., 500., 520., 700., 869., 900., 1000., 1020., 1200.])
CHINA = np.array([0., 113.67291609, 203.91000173, 317.73848174, 407.83554758, 520.68758457, 611.71791523, 701.95500087, 815.62791696, 905.8650026 , 1019.47514332, 1109.76982292, 1201.27828039])
### Maximum allowable deviation from a perfect octave
### i.e., scale is included if the intervals sum to 1200 +- OCT_CUT
OCT_CUT = 50
#############################################################################
### Functions to be used in reformatting the data
def get_cents_from_ratio(ratio):
return 1200.*np.log10(ratio)/np.log10(2)
def str_to_ints(st, delim=';'):
return [int(s) for s in st.split(delim) if len(s)]
def ints_to_str(i):
return ';'.join([str(x) for x in i])
def get_all_ints(df, old='pair_ints', new='all_ints2'):
def fn(pi):
ints = np.array(str_to_ints(pi))
return ints_to_str([x for i in range(len(ints)) for x in np.cumsum(np.roll(ints,i))[:-1]])
df[new] = df[old].apply(fn)
return df
#############################################################################
### Clusting the scales by the distance between interval sets
def find_min_pair_int_dist(b, c):
dist = 0.0
for i in range(len(b)):
dist += np.min(np.abs(c-b[i]))
return dist
def pair_int_distance(pair_ints):
pair_dist = np.zeros((len(pair_ints), len(pair_ints)), dtype=float)
for i in range(len(pair_ints)):
for j in range(len(pair_ints)):
dist1 = find_min_pair_int_dist(pair_ints[i], pair_ints[j])
dist2 = find_min_pair_int_dist(pair_ints[j], pair_ints[i])
pair_dist[i,j] = (dist1 + dist2) * 0.5
return pair_dist
def cluster_pair_ints(df, n_clusters):
pair_ints = np.array([np.array([float(x) for x in y.split(';')]) for y in df.pair_ints])
pair_dist = pair_int_distance(pair_ints)
li = linkage(pdist(pair_dist), 'ward')
return fcluster(li, li[-n_clusters,2], criterion='distance')
def label_scales_by_cluster(df, n=16):
nc = cluster_pair_ints(df, n)
df[f"cl_{n:02d}"] = nc
return df
#############################################################################
### Functions for extracting and reformatting the raw data
### Encode a scale as a binary string:
### If the first character is 0, then the first potential note in the scale is
### not played. If it is 1, then it is played.
### E.g. The major scale in 12-TET is given by 010110101011
### The intervals are then retrieved by comparing the mask with the correct tuning system
def reformat_scales_as_mask(df):
df['Intervals'] = df['Intervals'].astype(str)
st = '000000000000001'
fn = lambda x: '1' + ''.join([st[-int(i):] for i in x])
idx = df.loc[df.Tuning.apply(lambda x: x not in ['Unique', 'Turkish', '53-tet'])].index
df.loc[idx, 'mask'] = df.loc[idx, 'Intervals'].apply(fn)
fn = lambda x: '1' + ''.join([st[-int(i):] for i in x.split(';')])
idx = df.loc[df.Tuning=='53-tet'].index
df.loc[idx, 'mask'] = df.loc[idx, 'Intervals'].apply(fn)
return df
def reformat_surjodiningrat(df):
for row in df.itertuples():
ints = [get_cents_from_ratio(float(row[i+3])/float(row[i+2])) for i in range(7) if row[i+3] != 0]
df.loc[row[0], 'pair_ints'] = ';'.join([str(int(round(x))) for x in ints])
df['Reference'] = 'Surjodiningrat'
df['Theory'] = 'N'
df = df.drop(columns=[str(x) for x in range(1,9)])
return df
def reformat_original_csv_data(df):
new_df = pd.DataFrame(columns=['Name', 'Intervals', 'Culture', 'Region', 'Country', 'Tuning', 'Reference', 'RefID', 'Theory'])
for i, col in enumerate(df.columns):
tuning = df.loc[0, col]
culture = df.loc[1, col]
cont = df.loc[2, col]
country = df.loc[3, col]
ref = df.loc[4, col]
refid = df.loc[5, col]
theory = df.loc[6, col]
try:
int(col)
name = '_'.join([culture, col])
except:
name = col
ints = ';'.join([str(int(round(float(x)))) for x in df.loc[7:, col] if not str(x)=='nan'])
new_df.loc[i] = [name, ints, culture, cont, country, tuning, ref, refid, theory]
return new_df
def update_scale_data(data_dict, scale, name, country, culture, tuning, cont, ref, refID, theory):
data_dict['Name'].append(name)
data_dict['scale'].append(scale)
data_dict['all_ints'].append([scale[i] - scale[j] for j in range(len(scale)) for i in range(j+1,len(scale))])
data_dict['pair_ints'].append([scale[j+1] - scale[j] for j in range(len(scale)-1)])
data_dict['Tuning'].append(tuning)
data_dict['Country'].append(country)
data_dict['Culture'].append(culture)
data_dict['Region'].append(cont)
data_dict['Reference'].append(ref)
data_dict['RefID'].append(refID)
data_dict['Theory'].append(theory)
return data_dict
def scale_matching_fn(row):
# Only some tuning systems use 'mask'
try:
idx = np.where(np.array([int(x) for x in row.mask]))[0]
except TypeError:
pass
for tun in row.Tuning.split(';'):
if tun == '12-tet':
yield EQ12_INTS[idx]
elif tun == '53-tet':
yield EQ53_INTS[idx]
elif tun == 'Just':
yield JI_INTS[idx]
elif tun == 'Pythagorean':
yield PYT_INTS[idx]
elif tun == 'Arabic':
yield EQ24_INTS[idx]
elif tun == 'Dastgah-ha':
yield DASTGAH[idx]
elif tun == 'Vietnamese':
yield VIET[idx]
elif tun == 'Chinese':
yield CHINA[idx]
elif tun == 'Turkish':
yield np.cumsum([0.0] + [TURKISH[a] for a in row.Intervals])
elif tun == 'Khmer':
for KHM in [KHMER_1, KHMER_2]:
base = KHM[[i-1 for i in idx[1:]]]
for i in range(len(base)):
yield np.cumsum([0.] + np.roll(KHM,i))
def process_scale(scale):
scale = scale.astype(int)
adj_ints = np.diff(scale).astype(int)
N = len(adj_ints)
all_ints1 = np.array([i for j in range(len(scale)-1) for i in np.cumsum(adj_ints[j:])])
all_ints2 = np.array([i for j in range(len(scale)) for i in np.cumsum(np.roll(adj_ints, j))])
return adj_ints, N, scale, all_ints1, all_ints2
def match_scales_to_tunings(df):
df = reformat_scales_as_mask(df.copy())
cols = list(df.columns[:-1])
cols[2:2] = ['n_notes', 'scale', 'all_ints1', 'all_ints2']
new_df = pd.DataFrame(columns=cols)
for row in df.itertuples():
for scale in scale_matching_fn(row):
adj_ints, N, scale, all_ints1, all_ints2 = process_scale(scale)
vals = list(row)[1:-1]
vals[1] = adj_ints
vals[2:2] = [N, scale, all_ints1, all_ints2]
new_df.loc[len(new_df)] = vals
return new_df
def extract_scale_using_tonic(ints, tonic, oct_cut):
# If in str or list format, there are explicit instructions
# for each interval
# Otherwise, there is simply a starting note, and it should
# not go beyond a single octave
if isinstance(tonic, str):
tonic = np.array(str_to_ints(tonic))
tmin, tmax = min(tonic), max(tonic)
elif isinstance(tonic, (list, np.ndarray)):
tmin, tmax = min(tonic), max(tonic)
elif isinstance(tonic, (int, float)):
i_tonic = int(tonic) - 1
tonic = np.zeros(len(ints)+1)
tonic[i_tonic] = 1
tonic[-1] = 2
tmin, tmax = 1, 2
scale = []
for i, t1, t2 in zip(ints, tonic[:-1], tonic[1:]):
if t1 == tmin:
if len(scale):
yield np.array(scale)
scale = [0, i]
elif len(scale):
scale.append(i + scale[-1])
if scale[-1] > (1200 - OCT_CUT):
yield np.array(scale)
def extract_specific_modes(ints, tonic, modes):
if isinstance(tonic, str):
tonic = np.array(str_to_ints(tonic), int)
for m in modes.split(','):
m = str_to_ints(m)
extra = 0
scale = []
for i, t in zip(ints, tonic[:-1]):
if t == m[0]:
if len(scale):
if scale[-1] > (1200 - OCT_CUT):
yield np.array(scale)
scale = [0, i]
elif len(scale) and t in m:
scale.append(scale[-1] + i)
elif len(scale):
scale[-1] = scale[-1] + i
if scale[-1] > (1200 - OCT_CUT):
yield np.array(scale)
def eval_tonic(tonic):
if isinstance(tonic, str):
return tonic != 'N/A'
elif isinstance(tonic, (int, float)):
return not np.isnan(tonic)
def extract_scale(row, oct_cut=OCT_CUT, use_mode=False):
ints = np.array(row.Intervals)
# This column exists only for this instruction;
# If 'Y', then add the final interval needed for the scale
# to add up to an octave;
# See paper and excel file for more details
if row.Octave_modified == 'Y':
final_int = 1200 - sum(ints)
yield np.array([0.] + list(np.cumsum(list(ints) + [final_int])))
return
# Point of confusion here... clear it up
if not use_mode:
try:
for scale in extract_specific_modes(ints, row.Tonic, row.Modes):
yield scale
return
except AttributeError:
pass
# If the entry includes information on tonality, and if
# not using modes, follow the instructions given
if not use_mode:
if eval_tonic(row.Tonic):
for scale in extract_scale_using_tonic(ints, row.Tonic, oct_cut):
if abs(1200 - scale[-1]) <= oct_cut:
yield scale
return
if sum(ints) >= (1200 - oct_cut):
start_from = 0
for i in range(len(ints)):
if i < start_from:
continue
sum_ints = np.cumsum(ints[i:], dtype=int)
# If the total sum of ints is less than the cutoff, ignore this entry
if sum_ints[-1] < (1200 - OCT_CUT):
break
# Find the scale degree by finding the note closest to 1200
idx_oct = np.argmin(np.abs(sum_ints-1200))
oct_val = sum_ints[idx_oct]
# If the total sum of ints is greater than the cutoff, move
# on to the next potential scale
if abs(oct_val - 1200) > OCT_CUT:
continue
# If modes are not being used (i.e., if each interval is only
# allowed to be counted in a scale once) then start looking
# for new scales from this index
if not use_mode:
start_from = idx_oct + i + 1
yield np.array([0.] + list(sum_ints[:idx_oct+1]))
def extract_scales_from_measurements(df, oct_cut=OCT_CUT, use_mode=False):
if isinstance(df.loc[0, 'Intervals'], str):
df.Intervals = df.Intervals.apply(str_to_ints)
cols = list(df.columns)
cols[2:2] = ['n_notes', 'scale', 'all_ints1', 'all_ints2']
new_df = pd.DataFrame(columns=cols)
for row in df.itertuples():
for scale in extract_scale(row, oct_cut, use_mode):
adj_ints, N, scale, all_ints1, all_ints2 = process_scale(scale)
vals = list(row)[1:]
vals[1] = adj_ints
vals[2:2] = [N, scale, all_ints1, all_ints2]
new_df.loc[len(new_df)] = vals
return new_df
def distribution_statistics(X, xhi=0, N=1000):
X = X[np.isfinite(X)]
if xhi:
bins = np.linspace(0, xhi, N)
else:
bins = np.linspace(0, np.max(X), N)
hist = np.histogram(X, bins=bins)[0]
bin_mid = bins[:-1] + 0.5 * np.diff(bins[:2])
mode = bin_mid[np.argmax(hist)]
median = np.median(X)
mean = | np.mean(X) | numpy.mean |
# Author: <NAME>
"""
Script for training a model to predict properties using a Black Box alpha-divergence
minimisation Bayesian neural network.
"""
import argparse
import sys
from matplotlib import pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from BNN.bb_alpha import BB_alpha
from BNN.bnn_utils import load_reg_data
from data_utils import transform_data, TaskDataLoader, featurise_mols
def main(path, task, representation, use_pca, n_trials, test_set_size, use_rmse_conf, precompute_repr):
"""
:param path: str specifying path to dataset.
:param task: str specifying the task. One of ['Photoswitch', 'ESOL', 'FreeSolv', 'Lipophilicity']
:param representation: str specifying the molecular representation. One of ['SMILES, fingerprints, 'fragments', 'fragprints']
:param use_pca: bool. If True apply PCA to perform Principal Components Regression.
:param n_trials: int specifying number of random train/test splits to use
:param test_set_size: float in range [0, 1] specifying fraction of dataset to use as test set
:param use_rmse_conf: bool specifying whether to compute the rmse confidence-error curves or the mae confidence-
error curves. True is the option for rmse.
:param precompute_repr: bool indicating whether to precompute representations or not.
"""
data_loader = TaskDataLoader(task, path)
smiles_list, y = data_loader.load_property_data()
X = featurise_mols(smiles_list, representation)
if precompute_repr:
if representation == 'SMILES':
with open(f'precomputed_representations/{task}_{representation}.txt', 'w') as f:
for smiles in X:
f.write(smiles + '\n')
else:
np.savetxt(f'precomputed_representations/{task}_{representation}.txt', X)
# If True we perform Principal Components Regression
if use_pca:
n_components = 100
else:
n_components = None
r2_list = []
rmse_list = []
mae_list = []
# We pre-allocate arrays for plotting confidence-error curves
_, _, _, y_test = train_test_split(X, y, test_size=test_set_size, random_state=42) # To get test set size
# Photoswitch dataset requires 80/20 splitting. Other datasets are 80/10/10.
if task != 'Photoswitch':
split_in_two = int(len(y_test)/2)
n_test = split_in_two
else:
n_test = len(y_test)
rmse_confidence_list = np.zeros((n_trials, n_test))
mae_confidence_list = np.zeros((n_trials, n_test))
# For Calibration curve
prediction_prop = [[] for _ in range(n_trials)]
print('\nBeginning training loop...')
for i in range(0, n_trials):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_set_size, random_state=i)
if representation == 'SMILES':
np.savetxt(f'fixed_train_test_splits/{task}/X_train_split_{i}.txt', X_train, fmt="%s")
np.savetxt(f'fixed_train_test_splits/{task}/X_test_split_{i}.txt', X_test, fmt="%s")
np.savetxt(f'fixed_train_test_splits/{task}/y_train_split_{i}.txt', y_train)
np.savetxt(f'fixed_train_test_splits/{task}/y_test_split_{i}.txt', y_test)
else:
if task != 'Photoswitch':
# Artificially create a 80/10/10 train/validation/test split discarding the validation set.
split_in_two = int(len(y_test)/2)
X_test = X_test[0:split_in_two]
y_test = y_test[0:split_in_two]
y_train = y_train.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)
# We standardise the outputs but leave the inputs unchanged
_, y_train, _, y_test, y_scaler = transform_data(X_train, y_train, X_test, y_test, n_components=n_components, use_pca=use_pca)
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
np.random.seed(42)
datasets, n, d, mean_y_train, std_y_train = load_reg_data(X_train, y_train, X_test, y_test)
train_set_x, train_set_y = datasets[0]
test_set_x, test_set_y = datasets[1]
N_train = train_set_x.get_value(borrow=True).shape[0]
N_test = test_set_x.get_value(borrow=True).shape[0]
layer_sizes = [d, 20, 20, len(mean_y_train)]
n_samples = 100
alpha = 0.5
learning_rate = 0.01
v_prior = 1.0
batch_size = 32
print('... building model')
sys.stdout.flush()
bb_alpha = BB_alpha(layer_sizes, n_samples, alpha, learning_rate, v_prior, batch_size,
train_set_x, train_set_y, N_train, test_set_x, test_set_y, N_test, mean_y_train, std_y_train)
print('... training')
sys.stdout.flush()
test_error, test_ll = bb_alpha.train_ADAM(100)
print('Test RMSE: ', test_error)
print('Test ll: ', test_ll)
samples = bb_alpha.sample_predictive_distribution(X_test)
y_pred = np.mean(samples, axis=0)
var = np.var(samples, axis=0)
# For producing the calibration curve
for k in [0.13, 0.26, 0.39, 0.53, 0.68, 0.85, 1.04, 1.15, 1.28, 1.44, 1.645, 1.96]:
a = (y_scaler.inverse_transform(y_test) < y_scaler.inverse_transform(y_pred + k * np.sqrt(var)))
b = (y_scaler.inverse_transform(y_test) > y_scaler.inverse_transform(y_pred - k * np.sqrt(var)))
prediction_prop[i].append(np.argwhere((a == True) & (b == True)).shape[0] / len(y_test))
# We transform the standardised predictions back to the original data space
y_pred = y_scaler.inverse_transform(y_pred)
y_test = y_scaler.inverse_transform(y_test)
# Compute scores for confidence curve plotting.
ranked_confidence_list = np.argsort(var, axis=0).flatten()
for k in range(len(y_test)):
# Construct the RMSE error for each level of confidence
conf = ranked_confidence_list[0:k+1]
rmse = np.sqrt(mean_squared_error(y_test[conf], y_pred[conf]))
rmse_confidence_list[i, k] = rmse
# Construct the MAE error for each level of confidence
mae = mean_absolute_error(y_test[conf], y_pred[conf])
mae_confidence_list[i, k] = mae
# Output Standardised RMSE and RMSE on Train Set
train_samples = bb_alpha.sample_predictive_distribution(X_train)
y_pred_train = np.mean(train_samples, axis=0)
train_rmse_stan = np.sqrt(mean_squared_error(y_train, y_pred_train))
train_rmse = np.sqrt(mean_squared_error(y_scaler.inverse_transform(y_train), y_scaler.inverse_transform(y_pred_train)))
print("\nStandardised Train RMSE: {:.3f}".format(train_rmse_stan))
print("Train RMSE: {:.3f}".format(train_rmse))
score = r2_score(y_test, y_pred)
rmse = np.sqrt(mean_squared_error(y_test, y_pred))
mae = mean_absolute_error(y_test, y_pred)
print("\nR^2: {:.3f}".format(score))
print("RMSE: {:.3f}".format(rmse))
print("MAE: {:.3f}".format(mae))
r2_list.append(score)
rmse_list.append(rmse)
mae_list.append(mae)
if representation != 'SMILES':
r2_list = np.array(r2_list)
rmse_list = np.array(rmse_list)
mae_list = np.array(mae_list)
print("\nmean R^2: {:.4f} +- {:.4f}".format(np.mean(r2_list), np.std(r2_list)))
print("mean RMSE: {:.4f} +- {:.4f}".format(np.mean(rmse_list), np.std(rmse_list)))
print("mean MAE: {:.4f} +- {:.4f}\n".format(np.mean(mae_list), np.std(mae_list)))
# Plot confidence-error curves
confidence_percentiles = np.arange(1e-14, 100, 100/len(y_test)) # 1e-14 instead of 0 to stop weirdness with len(y_test) = 29
if use_rmse_conf:
rmse_mean = np.mean(rmse_confidence_list, axis=0)
rmse_std = np.std(rmse_confidence_list, axis=0)
# We flip because we want the most confident predictions on the right-hand side of the plot
rmse_mean = np.flip(rmse_mean)
rmse_std = np.flip(rmse_std)
# One-sigma error bars
lower = rmse_mean - rmse_std
upper = rmse_mean + rmse_std
plt.plot(confidence_percentiles, rmse_mean, label='mean')
plt.fill_between(confidence_percentiles, lower, upper, alpha=0.2)
plt.xlabel('Confidence Percentile')
plt.ylabel('RMSE')
plt.ylim([0, np.max(upper) + 1])
plt.xlim([0, 100*((len(y_test) - 1) / len(y_test))])
plt.yticks(np.arange(0, np.max(upper) + 1, 5.0))
plt.savefig(task + '/results/BNN/{}_{}_confidence_curve_rmse.png'.format(representation, task))
plt.show()
else:
# We plot the Mean-absolute error confidence-error curves
mae_mean = np.mean(mae_confidence_list, axis=0)
mae_std = np.std(mae_confidence_list, axis=0)
mae_mean = np.flip(mae_mean)
mae_std = np.flip(mae_std)
lower = mae_mean - mae_std
upper = mae_mean + mae_std
plt.plot(confidence_percentiles, mae_mean, label='mean')
plt.fill_between(confidence_percentiles, lower, upper, alpha=0.2)
plt.xlabel('Confidence Percentile')
plt.ylabel('MAE')
plt.ylim([0, | np.max(upper) | numpy.max |
"""
Returns an array containing the order statistic medians for a given
probability distribution along the X axis in the first Y axis value,
and ordered response data of a given sample along the X axis in the
second Y axis value. The order statistic medians are the percent
probability function values of the probability distribution at
regularily-spaced intervals. The ordered response data is the sorted
sample values. If the sample comes from a probability distribution
of the type given, the plot of the ordered response data against the
order statistic medians should give a straight line whose slope is
the offset from, and whose intercept is the scaling of, the given
distribution. Thus, the slope, intercept, and correlation coefficient
(r) of this fitted line are returned and the first three X elements
of the third Y axis value.
"""
from __future__ import print_function
import numpy
import scipy.stats
import pyferret
import pyferret.stats
def ferret_init(id):
"""
Initialization for the stats_probplotvals Ferret PyEF
"""
axes_values = [ pyferret.AXIS_DOES_NOT_EXIST ] * pyferret.MAX_FERRET_NDIM
axes_values[0] = pyferret.AXIS_CUSTOM
axes_values[1] = pyferret.AXIS_CUSTOM
false_influences = [ False ] * pyferret.MAX_FERRET_NDIM
retdict = { "numargs": 3,
"descript": "Returns [j=1] order statistic medians, " \
"[j=2] ordered response data, and " \
"[j=3] slope, intercept, and corr. coeff. of fitted line",
"axes": axes_values,
"argnames": ("SAMPLE", "PDNAME", "PDPARAMS"),
"argdescripts": ("Sample values for the ordered response data",
"Name of a continuous probability distribution for the order statistic medians",
"Parameters for this continuous probability distribution"),
"argtypes": (pyferret.FLOAT_ARRAY, pyferret.STRING_ONEVAL, pyferret.FLOAT_ARRAY),
"influences": (false_influences, false_influences, false_influences),
}
return retdict
def ferret_custom_axes(id):
"""
Custom axis information for stats_probplot_values Ferret PyEF
"""
size = 1
for axis in ( pyferret.X_AXIS, pyferret.Y_AXIS, pyferret.Z_AXIS,
pyferret.T_AXIS, pyferret.E_AXIS, pyferret.F_AXIS ):
axis_info = pyferret.get_axis_info(id, pyferret.ARG1, axis)
# Note: axes normal to the data have size = -1
num = axis_info.get("size", -1)
if num > 1:
size *= num
axis_defs = [ None ] * pyferret.MAX_FERRET_NDIM
axis_defs[0] = (1, size, 1, "VALUE_NUM", False, )
axis_defs[1] = (1, 3, 1, "OSM,ORD,P", False, )
return axis_defs
def ferret_compute(id, result, resbdf, inputs, inpbdfs):
"""
Assigns to result[:,0] the order statistic medians for
the probability distribution named in inputs[1] with
parameters given in inputs[2]. Assigns to result[:,1]
the ordered response data of the sample values given in
inputs[0]. Assigns to result[:3,2] the slope, intercept,
and correlation coefficient of the line fitted to a plot
of result[:,1] against result[:,0]. Undefined values
in inputs[0] are removed at the beginning of this
computation.
"""
distribname = inputs[1]
distname = pyferret.stats.getdistname(distribname)
if distname is None:
raise ValueError("Unknown probability function %s" % distribname)
distribparams = inputs[2].reshape(-1)
distparams = pyferret.stats.getdistparams(distname, distribparams)
if distparams is None:
raise ValueError("Unknown (for params) probability function %s" % distribname)
sample = inputs[0].reshape(-1)
badmask = ( numpy.fabs(sample - inpbdfs[0]) < 1.0E-5 )
goodmask = numpy.logical_not(numpy.logical_or(badmask, numpy.isnan(sample)))
ppdata = scipy.stats.probplot(sample[goodmask], distparams, distname, fit=1)
result[:] = resbdf
result[goodmask,0,0,0,0,0] = ppdata[0][0]
result[goodmask,1,0,0,0,0] = ppdata[0][1]
result[:3,2,0,0,0,0] = ppdata[1]
#
# The rest of this is just for testing this module at the command line
#
if __name__ == "__main__":
# make sure ferret_init does not have problems
info = ferret_init(0)
# Sample from a normal distribution
ydim = 23
zdim = 13
inpundefval = -1.0E+10
outundefval = -2.0E+10
# select a random sample from a normal distribution
size = ydim * zdim
sample = scipy.stats.norm(5.0, 2.0).rvs(size)
ordata = numpy.sort(sample)
# compare to the standard normal distribution (mu = 0.0, sigma = 1.0)
uvals = numpy.empty(size)
uvals[-1] = numpy.power(0.5, 1.0 / size)
uvals[0] = 1.0 - uvals[-1]
uvals[1:-1] = (numpy.arange(2.0, size-0.5, 1.0) - 0.3175) / (size + 0.365)
osmeds = scipy.stats.norm(0.0, 1.0).ppf(uvals)
# set up for a call to ferret_compute
pfname = "norm"
pfparams = | numpy.array([0.0, 1.0], dtype=numpy.float64) | numpy.array |
import sys
sys.path.append("../")
import numpy as np
from tensorflow.keras.models import model_from_json
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import logging
from PIL import Image
import urllib.request
import numpy as np
labels = {0: 'guinness',
1: 'hop-house',
2: 'fosters',
3: 'carlsberg',
4: 'becks',
5: 'corona',
6: 'heineken',
7: 'paulaner',
8: 'no-logo'}
def load_logo_model(model):
"""
load the saved trained logo detection model
"""
# logging.critical("Loading logo detection model...")
json_file = open(f'{model}.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(f"{model}.h5")
# logging.critical("Model is ready.")
return loaded_model
model = load_logo_model('beer_logo_model')
def logo_detection(image_url):
"""
Detects beer logos in every images
image_url: posts images urls (str)
return: detected logo in the image or no-logo (str)
"""
# load image from the url
img = Image.open(urllib.request.urlopen(image_url))
# trasnform to a desireable tensor for the model
img = img.resize((224, 224), Image.ANTIALIAS)
x = img_to_array(img)/255.
x = x.reshape((1,) + x.shape)
# prediction
result = model.predict(x)
prediction = | np.argmax(result) | numpy.argmax |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Time:
2021-08-23 7:28 下午
Author:
huayang
Subject:
"""
import doctest
import warnings
from typing import Union
import numpy as np
__all__ = [
'safe_indexing',
'split',
]
def shuffle(rows, random_seed=None):
""""""
rs = | np.random.RandomState(random_seed) | numpy.random.RandomState |
__license__ = """
Copyright (c) 2012 mpldatacursor developers
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import matplotlib.transforms as mtransforms
from mpl_toolkits import mplot3d
#-- Artist-specific pick info functions --------------------------------------
def _coords2index(im, x, y, inverted=False):
"""
Converts data coordinates to index coordinates of the array.
Parameters
-----------
im : An AxesImage instance
The image artist to operation on
x : number
The x-coordinate in data coordinates.
y : number
The y-coordinate in data coordinates.
inverted : bool, optional
If True, convert index to data coordinates instead of data coordinates
to index.
Returns
--------
i, j : Index coordinates of the array associated with the image.
"""
xmin, xmax, ymin, ymax = im.get_extent()
if im.origin == 'upper':
ymin, ymax = ymax, ymin
data_extent = mtransforms.Bbox([[ymin, xmin], [ymax, xmax]])
array_extent = mtransforms.Bbox([[0, 0], im.get_array().shape[:2]])
trans = mtransforms.BboxTransformFrom(data_extent) +\
mtransforms.BboxTransformTo(array_extent)
if inverted:
trans = trans.inverted()
return trans.transform_point([y,x]).astype(int)
def image_props(event):
"""
Get information for a pick event on an ``AxesImage`` artist. Returns a dict
of "i" & "j" index values of the image for the point clicked, and "z": the
(uninterpolated) value of the image at i,j.
Parameters
-----------
event : PickEvent
The pick event to process
Returns
--------
props : dict
A dict with keys: z, i, j
"""
x, y = event.mouseevent.xdata, event.mouseevent.ydata
i, j = _coords2index(event.artist, x, y)
z = event.artist.get_array()[i,j]
if z.size > 1:
# Override default numpy formatting for this specific case. Bad idea?
z = ', '.join('{:0.3g}'.format(item) for item in z)
return dict(z=z, i=i, j=j)
def line_props(event):
"""
Get information for a pick event on a Line2D artist (as created with
``plot``.)
This will yield x and y values that are interpolated between vertices
(instead of just being the position of the mouse) or snapped to the nearest
vertex if only the vertices are drawn.
Parameters
-----------
event : PickEvent
The pick event to process
Returns
--------
props : dict
A dict with keys: x & y
"""
xclick, yclick = event.mouseevent.xdata, event.mouseevent.ydata
i = event.ind[0]
xorig, yorig = event.artist.get_xydata().T
# For points-only lines, snap to the nearest point.
linestyle = event.artist.get_linestyle()
if linestyle in ['none', ' ', '', None, 'None']:
return dict(x=xorig[i], y=yorig[i])
# ax.step is actually implemented as a Line2D with a different drawstyle...
xs_data = xorig[max(i - 1, 0) : i + 2]
ys_data = yorig[max(i - 1, 0) : i + 2]
drawstyle = event.artist.drawStyles[event.artist.get_drawstyle()]
if drawstyle == "_draw_lines":
pass
elif drawstyle == "_draw_steps_pre":
xs_data = _interleave(xs_data, xs_data[:-1])
ys_data = _interleave(ys_data, ys_data[1:])
elif drawstyle == "_draw_steps_post":
xs_data = _interleave(xs_data, xs_data[1:])
ys_data = _interleave(ys_data, ys_data[:-1])
elif drawstyle == "_draw_steps_mid":
mid_xs = (xs_data[:-1] + xs_data[1:]) / 2
xs_data = _interleave(xs_data, | np.column_stack([mid_xs, mid_xs]) | numpy.column_stack |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class Scaler(object):
"""
Iterative estimation of row and column centering/scaling
using the algorithm from page 31 of:
Matrix Completion and Low-Rank SVD via Fast Alternating Least Squares
"""
def __init__(
self,
center_columns=True,
scale_columns=True,
min_value=None,
max_value=None,
verbose=True):
self.center_columns = center_columns
self.scale_columns = scale_columns
self.min_value = min_value
self.max_value = max_value
self.verbose = verbose
self.column_centers = None
self.column_scales = None
def fit(self, X):
if self.center_columns:
self.column_centers = np.nanmean(X, axis=0)
if self.scale_columns:
self.column_scales = np.nanstd(X, axis=0)
self.column_scales[self.column_scales == 0] = 1.0
return self
def transform(self, X):
X = np.asarray(X).copy()
if self.center_columns:
X -= self.column_centers
if self.scale_columns:
X /= self.column_scales
return X
def fit_transform(self, X):
self.fit(X)
return self.transform(X)
def inverse_transform(self, X):
X = np.asarray(X).copy()
if self.scale_columns:
X *= self.column_scales
if self.center_columns:
X += self.column_centers
return X
class BiScaler(object):
"""
Iterative estimation of row and column centering/scaling
using the algorithm from page 31 of:
Matrix Completion and Low-Rank SVD via Fast Alternating Least Squares
"""
def __init__(
self,
center_rows=True,
center_columns=True,
scale_rows=True,
scale_columns=True,
min_value=None,
max_value=None,
max_iters=100,
tolerance=0.001,
verbose=True):
self.center_rows = center_rows
self.center_columns = center_columns
self.scale_rows = scale_rows
self.scale_columns = scale_columns
self.min_value = min_value
self.max_value = max_value
self.max_iters = max_iters
self.tolerance = tolerance
self.verbose = verbose
def estimate_row_means(
self,
X,
observed,
column_means,
column_scales):
"""
row_center[i] =
sum{j in observed[i, :]}{
(1 / column_scale[j]) * (X[i, j] - column_center[j])
}
------------------------------------------------------------
sum{j in observed[i, :]}{1 / column_scale[j]}
"""
n_rows, n_cols = X.shape
column_means = np.asarray(column_means)
if len(column_means) != n_cols:
raise ValueError("Expected length %d but got shape %s" % (
n_cols, column_means.shape))
X = X - column_means.reshape((1, n_cols))
column_weights = 1.0 / column_scales
X *= column_weights.reshape((1, n_cols))
row_means = np.zeros(n_rows, dtype=X.dtype)
row_residual_sums = np.nansum(X, axis=1)
for i in range(n_rows):
row_mask = observed[i, :]
sum_weights = column_weights[row_mask].sum()
row_means[i] = row_residual_sums[i] / sum_weights
return row_means
def estimate_column_means(
self,
X,
observed,
row_means,
row_scales):
"""
column_center[j] =
sum{i in observed[:, j]}{
(1 / row_scale[i]) * (X[i, j]) - row_center[i])
}
------------------------------------------------------------
sum{i in observed[:, j]}{1 / row_scale[i]}
"""
n_rows, n_cols = X.shape
row_means = | np.asarray(row_means) | numpy.asarray |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""
testManifoldFirstOrder.py
Implement mposa's paper with first order thing.
I temporarily give up analytic gradient now and only subclass probFun for quick implementation
It turns out that Posa's approach does not work well for such a simple problem. I have to think more.
Is this problem simply too simple for this approach to handle? Or the guess is simply too bad.
I can test it by providing a guess from collocation approach without special care on vel/acce.
"""
import sys, os, time
import numpy as np
import matplotlib.pyplot as plt
import logging
sys.path.append('../')
from trajOptLib.io import getOnOffArgs
from trajOptLib import daeSystem, trajOptCollocProblem
from trajOptLib import lqrObj, nonLinearObj
from trajOptLib import system
from trajOptLib import manifoldConstr, nonLinearPointConstr
from trajOptLib import snoptConfig, solver, probFun
from trajOptLib import trajOptCollocProblem
from trajOptLib.utility import showSol
from carCommon import OmniCar, FirstOrderOmniCar, CircleConstr, CollocCircleConstr
class penObj(nonLinearObj):
"""Penalty function."""
def __init__(self, prob):
self.prob = prob
nonLinearObj.__init__(self, prob.nx, nG=0)
self.weight_u = 1
self.weight_lmd = 1
def __callg__(self, x, y, ):
psx = self.prob.parseX(x)
obj = self.weight_u * np.sum(psx['U']) + self.weight_lmd * np.sum(psx['P'])
y[0] = obj
class CarProb(probFun):
"""This class is an implementation of mPosa's approach on a simple car problem.
It calculates no gradient and does not inherent from those complex classes.
This is only used for prototype."""
def __init__(self, sys, con, x0, xf, N, h):
"""We assume a fixed time stuff, a first order system.
We use simplified version, we only optimize state (no derivative) at knots, impose dyn constr at collocation.
We only impose manifold constraint on knots, we correct dyn constr at collocation points
Parameters
----------
sys : the system instance, it gives information on a few dimensions
con : the manifold constraint
x0 : initial state
xf : final state
N : discretization size
h : grid size
"""
self.N = N
self.h = h
self.x0 = x0
self.xf = xf
self.sys = sys
self.dimx = sys.nx
self.dimu = sys.nu
self.dimp = sys.np
self.con = con
self.nc = con.nc # it equals the correction gamma
self.nc_man = con.nf
# construct problem
numSol = N * (sys.nx + sys.nu + sys.np) + (N - 1) * (self.dimp + self.nc)
numF = 1 + (N - 1) * sys.nx + (N - 2) * self.nc_man + 2 * self.nc # 2*dimx means we release initial and final one
# possible update, make xc be on manifold, and let xm (from integration) be closest to the manifold
# numF += (N - 1) * self.nc_man
probFun.__init__(self, numSol, numF)
# set bounds for them
self.setBounds()
def __callf__(self, x, y):
psx = self.parseX(x)
psf = self.parseF(y)
X = psx['X']
U = psx['U']
P = psx['P']
Lmd = psx['Lmd']
Gamma = psx['Gamma']
obj = psf['obj']
dyn = psf['dyn']
man_mid = psf['man_mid']
man_acce_0 = psf['man_acce_0']
man_acce_f = psf['man_acce_f']
# calculate obj, it is lqr cost
obj[0] = np.sum(U**2) + (np.sum(P**2))
# impose dyn constr
for i in range(self.N - 1):
if i == 0:
dx0 = self.sys.dyn(0, X[i], U[i], P[i])
else:
dx0 = dx1
dx1 = self.sys.dyn(0, X[i+1], U[i+1], P[i+1])
if i < self.N - 2: # impose manifold constraints for knot points
catX = np.concatenate((X[i + 1], dx1))
self.con.__callf__(catX, man_mid[i])
xmid = 0.5*(X[i]+X[i+1])+self.h/8*(dx0 - dx1)
xmiddot = 1.5/self.h*(X[i+1] - X[i]) - 0.25*(dx0 + dx1)
umid = (U[i] + U[i+1])/2
dxm = self.sys.dyn(0, xmid, umid, Lmd[i])
corr = self.con.__return_correction__(xmid, Gamma[i])
dxm[:self.dimx/2] += corr
dyn[i] = xmiddot - dxm
# impose acce constr on
dx0 = self.sys.dyn(0, X[0], U[0], P[0])
dxf = self.sys.dyn(0, X[-1], U[-1], P[-1])
self.con.__callf__(np.concatenate((X[0], dx0)), man_acce_0, acce=True)
self.con.__callf__(np.concatenate((X[-1], dxf)), man_acce_f, acce=True)
def setBounds(self):
xlb = -1e20*np.ones(self.nx)
xub = -xlb
lb = np.zeros(self.nf)
ub = np.zeros(self.nf)
psxlb = self.parseX(xlb)
psxub = self.parseX(xub)
psxlb['X'][0, :self.dimx] = self.x0
psxub['X'][0, :self.dimx] = self.x0
psxlb['X'][-1, :self.dimx] = self.xf
psxub['X'][-1, :self.dimx] = self.xf
# psxub['Lmd'][:] = 0 # lmd should be negative
# psxub['P'][:] = 0 # lmd should be negative
self.lb = lb
self.ub = ub
self.xlb = xlb
self.xub = xub
def parseX(self, x):
"""Parse a long vector x into parts"""
n0, n1 = 0, self.N * (self.dimx + self.dimu + self.dimp)
XUP = np.reshape(x[:n1], (self.N, self.dimx+self.dimu+self.dimp)) # state part
X = XUP[:, :self.dimx]
U = XUP[:, self.dimx:self.dimx+self.dimu]
P = XUP[:, self.dimx+self.dimu:self.dimx+self.dimu+self.dimp]
n0 = n1
n1 = n0 + (self.N - 1) * self.dimp # support force
Lmd = np.reshape(x[n0:n1], (self.N - 1, self.dimp))
n0 = n1
# Gamma term
n1 = n0 + (self.N - 1) * self.nc
Gamma = np.reshape(x[n0:n1], (self.N - 1, self.nc))
assert n1 == self.nx
return {'X': X, 'U': U, 'P': P, 'Lmd': Lmd, 'Gamma': Gamma}
def parseF(self, f):
"""Parse f"""
obj = f[0:1]
n0 = 1
n1 = n0 + self.dimx * (self.N - 1)
dyn = | np.reshape(f[n0:n1], (self.N - 1, self.dimx)) | numpy.reshape |
import numpy as np
from PIL import Image
import xml.etree.ElementTree as ET
import scipy.misc as scm
def crop_center(img, bboxes, cropx,cropy):
y,x = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
new_img = img[starty:starty+cropy,startx:startx+cropx]
bboxes[:, 0]-=starty
bboxes[:, 1]-=startx
bboxes[:, 2]-=0
bboxes[:, 3]-=0
return new_img, bboxes
def read_data(xml_path, img_path, CLASSES):
tree = ET.parse(xml_path)
root = tree.getroot()
objects = root.findall("object")
names = []
gtbboxes = np.zeros([len(objects), 4], dtype=np.int32)
for idx, obj in enumerate(objects):
curr_name = obj.find("name").text
if curr_name not in CLASSES:
continue
names.append(curr_name)
xmin = int(obj.find("bndbox").find("xmin").text)
xmax = int(obj.find("bndbox").find("xmax").text)
ymin = int(obj.find("bndbox").find("ymin").text)
ymax = int(obj.find("bndbox").find("ymax").text)
gtbboxes[idx, 0] = (xmin + xmax)//2
gtbboxes[idx, 1] = (ymin + ymax)//2
gtbboxes[idx, 2] = xmax - xmin
gtbboxes[idx, 3] = ymax - ymin
#img = np.array(Image.open(img_path)) #
img = np.array(scm.imread(img_path))
labels = np.zeros([len(objects)])
for idx, name in enumerate(names):
labels[idx] = CLASSES.index(name)
return img, gtbboxes, labels
def resize_img_bbox(img, bboxes, IMG_W, IMG_H, channels, crop=False):
if crop:
img,bboxes = crop_center(img,bboxes,400,400)
img_h, img_w = img.shape[0], img.shape[1]
resized_bboxes = np.zeros_like(bboxes)
resized_bboxes[:, 0] = IMG_W * bboxes[:, 0] / img_w
resized_bboxes[:, 1] = IMG_H * bboxes[:, 1] / img_h
resized_bboxes[:, 2] = IMG_W * bboxes[:, 2] / img_w
resized_bboxes[:, 3] = IMG_H * bboxes[:, 3] / img_h
resized_img = scm.imresize(img, [IMG_H, IMG_W, channels])
resized_img = np.reshape(resized_img, [IMG_H,IMG_W,channels])
#np.array(Image.fromarray(img).resize([IMG_W, IMG_H]))
return resized_img, resized_bboxes
def pre_process(img):
img = img / 255.0
img = img - 0.5
img = img * 2.0
return img
def cal_ious(anchors, gtbboxes):
anchors = anchors[np.newaxis, :, :]
gtbboxes = gtbboxes[:, np.newaxis, :]
anchors_x1 = anchors[:, :, 0] - anchors[:, :, 2] / 2
anchors_x2 = anchors[:, :, 0] + anchors[:, :, 2] / 2
anchors_y1 = anchors[:, :, 1] - anchors[:, :, 3] / 2
anchors_y2 = anchors[:, :, 1] + anchors[:, :, 3] / 2
gtbboxes_x1 = gtbboxes[:, :, 0] - gtbboxes[:, :, 2] / 2
gtbboxes_x2 = gtbboxes[:, :, 0] + gtbboxes[:, :, 2] / 2
gtbboxes_y1 = gtbboxes[:, :, 1] - gtbboxes[:, :, 3] / 2
gtbboxes_y2 = gtbboxes[:, :, 1] + gtbboxes[:, :, 3] / 2
inter_x1 = np.maximum(anchors_x1, gtbboxes_x1)
inter_x2 = np.minimum(anchors_x2, gtbboxes_x2)
inter_y1 = np.maximum(anchors_y1, gtbboxes_y1)
inter_y2 = np.minimum(anchors_y2, gtbboxes_y2)
inter_area = np.maximum(0., inter_x2 - inter_x1) * np.maximum(0., inter_y2 - inter_y1)
union_area = anchors[:, :, 2] * anchors[:, :, 3] + gtbboxes[:, :, 2] * gtbboxes[:, :, 3] - inter_area
ious = inter_area / union_area
return ious
def generate_minibatch(proposal, gtbboxes, classes, IMG_W, IMG_H, MINIBATCH, CLASSES):
#gtbboxes: [None, 4]
proposal_x1 = proposal[:, 0] - proposal[:, 2]/2
proposal_x2 = proposal[:, 0] + proposal[:, 2]/2
proposal_y1 = proposal[:, 1] - proposal[:, 3]/2
proposal_y2 = proposal[:, 1] + proposal[:, 3]/2
proposal_x1[proposal_x1 < 0.] = 0
proposal_x2[proposal_x2 >= IMG_W] = IMG_W - 1
proposal_y1[proposal_y1 < 0.] = 0
proposal_y2[proposal_y2 >= IMG_H] = IMG_H - 1
x, y = (proposal_x1 + proposal_x2) / 2, (proposal_y1 + proposal_y2) / 2
w, h = proposal_x2 - proposal_x1, proposal_y2 - proposal_y1
proposal = np.stack((x, y, w, h), axis=1)
ious = cal_ious(proposal, gtbboxes)#[nums_obj, nums_anchor]
max_iou_idx = np.where(np.abs(ious - np.max(ious, axis=1, keepdims=True)) < 1e-3)[1]
ious = np.max(ious, axis=0)
iou_greater_5_idx = np.where(ious >= 0.5)[0]
pos_idx = np.union1d(max_iou_idx, iou_greater_5_idx)
neg_idx = np.where(ious < 0.5)[0]
neg_idx_ = np.where(ious >= 0.1)[0]
neg_idx = np.intersect1d(neg_idx, neg_idx_)
neg_idx = np.setdiff1d(neg_idx, max_iou_idx)#remove some bboxes that may be iou < 0.1, but they are the maxest overlapping
pos_nums = pos_idx.shape[0]
neg_nums = neg_idx.shape[0]
if neg_nums == 0:
neg_idx = np.where(ious < 0.3)[0]
neg_nums = neg_idx.shape[0]
print('HERE....', neg_nums, pos_nums)
if pos_nums < MINIBATCH//4:
remain_nums = MINIBATCH - pos_nums
rand_idx = np.random.randint(0, neg_nums, [remain_nums])
mini_batch_pos = proposal[pos_idx]
mini_batch_neg = proposal[neg_idx[rand_idx]]
mini_batch = np.concatenate((mini_batch_pos, mini_batch_neg), axis=0)
mask = np.concatenate((np.ones([pos_nums]), np.zeros([remain_nums])))
pos_iou = cal_ious(mini_batch_pos, gtbboxes)
pos_gt_idx = np.argmax(pos_iou, axis=0)
pos_gt_bbox = gtbboxes[pos_gt_idx]
pos_classes = classes[pos_gt_idx]
else:
rand_pos_idx = np.random.randint(0, pos_nums, [MINIBATCH//4])
rand_neg_idx = np.random.randint(0, neg_nums, [MINIBATCH * 3//4])
mini_batch_pos = proposal[pos_idx[rand_pos_idx]]
mini_batch_neg = proposal[neg_idx[rand_neg_idx]]
mini_batch = np.concatenate((mini_batch_pos, mini_batch_neg), axis=0)
mask = np.concatenate((np.ones([MINIBATCH//4]), np.zeros([MINIBATCH * 3//4])), axis=0)
pos_iou = cal_ious(mini_batch_pos, gtbboxes)
pos_gt_idx = np.argmax(pos_iou, axis=0)
pos_gt_bbox = gtbboxes[pos_gt_idx]
pos_classes = classes[pos_gt_idx]
target_bbox = bbox2offset(mini_batch_pos, pos_gt_bbox)
init_target_bbox = np.zeros([MINIBATCH, 4])
init_target_classes = np.ones([MINIBATCH]) * len(CLASSES)
init_target_classes[:pos_classes.shape[0]] = pos_classes
init_target_bbox[:target_bbox.shape[0]] = target_bbox
return mini_batch, mask, init_target_bbox, init_target_classes
def offset2bbox_np(pred_t, anchor_idx, anchors):
anchors = anchors[np.int32(anchor_idx)]
pred_t = pred_t[:anchor_idx.shape[0]]
pred_bbox_x = pred_t[:, 0:1] * anchors[:, 2:3] + anchors[:, 0:1]
pred_bbox_y = pred_t[:, 1:2] * anchors[:, 3:4] + anchors[:, 1:2]
pred_bbox_w = np.exp(pred_t[:, 2:3]) * anchors[:, 2:3]
pred_bbox_h = np.exp(pred_t[:, 3:4]) * anchors[:, 3:4]
return np.concatenate((pred_bbox_x, pred_bbox_y, pred_bbox_w, pred_bbox_h), axis=-1)
def bbox2offset(anchor_bbox, gt_bbox):
t_x = (gt_bbox[:, 0:1] - anchor_bbox[:, 0:1])/anchor_bbox[:, 2:3]
t_y = (gt_bbox[:, 1:2] - anchor_bbox[:, 1:2])/anchor_bbox[:, 3:4]
t_w = np.log(gt_bbox[:, 2:3] / anchor_bbox[:, 2:3])
t_h = np.log(gt_bbox[:, 3:4] / anchor_bbox[:, 3:4])
return np.concatenate([t_x, t_y, t_w, t_h], axis=-1)
def pre_process_xml1(XML_PATH, ):
#print("............HERE............")
xml_names = os.listdir(XML_PATH)
#print("...........Initial number of xmls:", len(xml_names))
final_xml = []
for i in range(len(xml_names)):
filename = os.path.join(XML_PATH, xml_names[i])
try:
tree = ET.parse(filename)
except:
import pdb
pdb.set_trace()
root = tree.getroot()
objects = root.findall("object")
names = []
for idx, obj in enumerate(objects):
curr_name = obj.find("name").text
if curr_name in CLASSES:
names.append(curr_name)
if len(names)!=0:
final_xml.append(xml_names[i])
#print("...........Final number of xmls:", len(final_xml))
return final_xml
def read_batch(proposals, CLASSES, xml_names, XML_PATH, IMG_PATH, BATCHSIZE, MINIBATCH, IMG_H, IMG_W, CROP, SUFFIX):
rand_idx = np.random.randint(0, len(xml_names), [BATCHSIZE])
batch_imgs = | np.zeros([BATCHSIZE, IMG_H, IMG_W, 1]) | numpy.zeros |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import csv, os
from scipy.stats.kde import gaussian_kde
class protein_length(object):
'''
Probability distributions of protein lenght across different organisms.
Protein length is calculated in amino acids (AA), based on the
coding sequence in the genome.
ABUNDANCE WEIGHTED PDF:
-kernel-density estimates using Gaussian kernels
-histograms with 50 AA bin width. Histograms are normalized
such that the integral of the histograms will sum to 1
to form a probability density
'''
def __init__(self, length, abundance, xlim=5000, ylim=1):
self.xlim = xlim
self.ylim = ylim
self.length = length #protein length in amino acids
self.abundance = abundance #protein abundance - realtive within samples
def genomic_dist(self, ax, label='', draw_hist=True, draw_KDE=True,
KDE_color='r', hist_color='0.6'):
'''
params:
- ax: matplotlib axis to draw plot in
- draw_hist: draws normalized histogram with 50 AA bins
- draw_KDE: draws gaussian based kernel density estimate of data
'''
ax.set_axis_bgcolor('#FFE6C0')
if draw_hist:
ax.hist(self.length, histtype='stepfilled',
color=hist_color, edgecolor='none', lw=2,
bins=range(0, | np.max(self.length) | numpy.max |
"""
Copyright (c) 2014 High-Performance Computing and GIS (HPCGIS) Laboratory. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
Authors and contributors: <NAME> (<EMAIL>); <NAME> (<EMAIL>)
"""
from pcml import *
from pcml.util.LayerBuilder import *
from numpy.ma import allequal
from os import path
import numpy as np
import cml_test
import unittest
# Preliminary tests for spatial operations
class TestLayerOperationsSerial(cml_test.PCMLSerialTestCase):
def setUp(self):
super(TestLayerOperationsSerial, self).setUp()
self.datadir = './data'
self.l1 = lst_to_layer([[1]*4]*4)
self.l2 = lst_to_layer([[2]*4]*4)
self.l3 = lst_to_layer([[5]*4]*4)
self.l4 = lst_to_layer([[normolized_value(1.53)] * 13] * 9)
# l5 = l1+(l2+l3)*l4
self.l5 = lst_to_layer([[normolized_value(11.71)] * 4] * 4)
self.l6 = lst_to_layer([range(1,5)] * 4)
self.l7 = lst_to_layer([[2, 2.5, 2.5, 3]] * 4)
self.l8 = ReadASCIIGrid(path.join(self.datadir, 'data_c.asc'))
self.l9 = Layer(0,0,100, 100, 'notitle')
self.l9.set_nparray(np.ones((100,100)),.5,-999)
self.l10 = Layer(0,0,100,100,'notitle')
self.l11 = Layer(0,0,100, 100, 'notitle')
pt_lst = [{'x':-81.4479691,'y':41.0593074,'z':1}
,{'x':-81.5135,'y':41.0293074,'z':1}
,{'x':-81.4779691,'y':41.0503074,'z':1}
,{'x':-81.3779691,'y':41.0303074,'z':1}
,{'x':-81.409691,'y':41.103074,'z':1}
,{'x':-81.51079691,'y':41.08893074,'z':1}
,{'x':-81.4779691,'y':41.0573074,'z':1}]
self.l10.set_pointlist(pt_lst)
self.l10.nrows = self.l9.nrows
self.l10.ncols = self.l9.ncols
arr11=np.array([[1,2,3,1],[1,2,3,2],[1,3,2,4],[1,3,2,1]])
self.l11.set_nparray(arr11,5,-999)
# To ensure FocalMean Operation gives the correct output with different layers
def test_focalmean(self):
lo = FocalMean(self.l1, buffersize=1)
self.assertTrue(allequal(lo._data, self.l1._data), "FocalMean validation failed")
lo = FocalMean(self.l4, buffersize=1)
self.assertTrue(np.allclose(lo._data, self.l4._data))
# To ensure FocalMean Operation gives the correct output with different buffersizes
lo = FocalMean(self.l6, buffersize=2)
self.assertTrue(np.allclose(lo._data, self.l7._data))
# To ensure FocalMean Columndecompostion gives the correct output with different buffer sizes
def test_focalmean_coldecomp(self):
lo = FocalMean(self.l1, buffersize=1,decomposition=columndecomposition)
self.assertTrue(allequal(lo._data, self.l1._data), "FocalMean validation failed")
lo = FocalMean(self.l4, buffersize=1,decomposition=columndecomposition)
self.assertTrue(np.allclose(lo._data, self.l4._data))
lo = FocalMean(self.l6, buffersize=2,decomposition=columndecomposition)
self.assertTrue(np.allclose(lo._data, self.l7._data))
# To ensure FocalMean Operation with numpy implementation gives the correct output with different buffer sizes
def test_focalmean_np(self):
lo = FocalMean_np(self.l1, buffersize=1)
self.assertTrue(allequal(lo._data, self.l1._data), "FocalMean_np validation failed")
lo = FocalMean_np(self.l4, buffersize=1)
self.assertTrue(np.allclose(lo._data, self.l4._data))
lo = FocalMean_np(self.l6, buffersize=2)
self.assertTrue(np.allclose(lo._data, self.l7._data))
# To ensure FocalMean Operation with numpy gives the correct output with different buffer sizes
def test_focalmean_np(self):
lo = FocalMean_np(self.l1, buffersize=1)
self.assertTrue(allequal(lo._data, self.l1._data), "FocalMean_np validation failed")
lo = FocalMean_np(self.l4, buffersize=1)
self.assertTrue(np.allclose(lo._data, self.l4._data))
lo = FocalMean_np(self.l6, buffersize=2)
self.assertTrue(np.allclose(lo._data, self.l7._data))
# To ensure FocalMaximum Operation gives the correct output with different layers
def test_focalmaximum(self):
lo = FocalMaximum(self.l1,self.l2, buffersize=0)
self.assertTrue(allequal(lo._data, self.l2._data))
lo = FocalMaximum(self.l1,self.l2, buffersize=2)
self.assertTrue(allequal(lo._data, self.l2._data))
lo = FocalMaximum(self.l1,self.l2, buffersize=2,decomposition=columndecomposition)
self.assertTrue(allequal(lo._data, self.l2._data))
# To ensure FocalMinimum Operation gives the correct output with different buffer sizes
def test_focalminimum(self):
lo = FocalMinimum(self.l1,self.l2, buffersize=0)
self.assertTrue(allequal(lo._data, self.l1._data))
lo = FocalMinimum(self.l1,self.l2, buffersize=2)
self.assertTrue(allequal(lo._data, self.l1._data))
lo1=FocalMaximum(self.l1,lo, buffersize=1)
self.assertTrue(allequal(lo1._data,lo._data))
lo1=FocalMaximum(self.l1,lo, buffersize=1,decomposition=columndecomposition)
self.assertTrue(allequal(lo1._data,lo._data))
# To ensure FocalMaximum Operation with numpy gives the correct output with different buffer sizes
def test_focalmaximum_np(self):
lo = FocalMaximum_np(self.l1,self.l2, buffersize=0)
self.assertTrue(allequal(lo._data, self.l2._data))
lo = FocalMaximum_np(self.l1,self.l2, buffersize=2)
self.assertTrue(allequal(lo._data, self.l2._data))
lo1=FocalMaximum_np(self.l1,lo, buffersize=1)
self.assertTrue(allequal(lo1._data,lo._data))
lo1=FocalMaximum(self.l1,lo, buffersize=1,decomposition=columndecomposition)
self.assertTrue(allequal(lo1._data,lo._data))
# To ensure FocalMinimum Operation with numpy gives the correct output with different buffer sizes
def test_focalminimum_np(self):
lo = FocalMinimum_np(self.l1,self.l2, buffersize=0)
self.assertTrue(allequal(lo._data, self.l1._data))
lo = FocalMinimum_np(self.l1,self.l2, buffersize=2)
self.assertTrue(allequal(lo._data, self.l1._data))
lo1=FocalMinimum_np(self.l1,lo, buffersize=1)
self.assertTrue(allequal(lo1._data,lo._data))
lo1=FocalMaximum(self.l1,lo, buffersize=1,decomposition=columndecomposition)
self.assertTrue(allequal(lo1._data,lo._data))
# To ensure FocalMajority Operation gives the correct output with different buffer sizes
def test_focalMajority(self):
lo = FocalMajority(self.l1, buffersize=0)
self.assertTrue(allequal(lo._data, self.l1._data))
lo = FocalMajority(self.l1, buffersize=1)
self.assertTrue(allequal(lo._data, self.l1._data))
res = np.asarray([[1,1,2,3],[1,1,2,3],[1,1,2,2],[1,1,3,2]])
lo = FocalMajority(self.l11, buffersize=1)
self.assertTrue(allequal(res,lo._data))
lo = FocalMajority(self.l11, buffersize=1,decomposition=columndecomposition)
self.assertTrue(allequal(res,lo._data))
# To ensure FocalPercentage Operation gives the correct output with different buffer sizes
def test_focalpercentage(self):
lo = FocalPercentage(self.l1, buffersize=1)
res = np.asarray([[100]*4]*4)
self.assertTrue(allequal(lo._data, res))
lo = FocalPercentage(self.l2, buffersize=3,decomposition=columndecomposition)
self.assertTrue(allequal(lo._data, res))
# To ensure FocalMean Operation with numpy by executor gives the correct output with different buffer sizes
def test_focalmean_np_exec(self):
lo = FocalMean_np_exec(self.l1, buffersize=1)
res = np.asarray([[1]*4]*4)
self.assertTrue(allequal(lo._data, res))
lo = FocalMean_np_exec(self.l2, buffersize=3,decomposition=columndecomposition)
res = np.asarray([[2]*4]*4)
self.assertTrue(allequal(lo._data, res))
# To ensure FocalSum Operation gives the correct output with different buffer sizes
def test_focalsum(self):
lo = FocalSum(self.l1, buffersize=1)
res = | np.asarray([[4,6,6,4],[6,9,9,6],[6,9,9,6],[4,6,6,4]]) | numpy.asarray |
import torch.utils.data as data
import os
import os.path
import numpy as np
from numpy.random import randint
import torch
from colorama import init
from colorama import Fore, Back, Style
init(autoreset=True)
class VideoRecord(object):
def __init__(self, row):
self._data = row
@property
def path(self):
return self._data[0]
@property
def num_frames(self):
return int(self._data[1])
@property
def label(self):
return int(self._data[2])
class TSNDataSet(data.Dataset):
def __init__(self, root_path, list_file, num_dataload,
num_segments=3, new_length=1, modality='RGB',
image_tmpl='img_{:05d}.t7', transform=None,
force_grayscale=False, random_shift=True, test_mode=False):
self.root_path = root_path
self.list_file = list_file
self.num_segments = num_segments
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.num_dataload = num_dataload
if self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus':
self.new_length += 1 # Diff needs one more image to calculate diff
self._parse_list() # read all the video files
def _load_feature(self, directory, idx):
if self.modality == 'RGB' or self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus':
feat_path = os.path.join(directory, self.image_tmpl.format(idx))
try:
feat = [torch.load(feat_path)]
except:
print(Back.RED + feat_path)
return feat
elif self.modality == 'Flow':
x_feat = torch.load(os.path.join(directory, self.image_tmpl.format('x', idx)))
y_feat = torch.load(os.path.join(directory, self.image_tmpl.format('y', idx)))
return [x_feat, y_feat]
def _parse_list(self):
self.video_list = [VideoRecord(x.strip().split(' ')) for x in open(self.list_file)]
# repeat the list if the length is less than num_dataload (especially for target data)
n_repeat = self.num_dataload//len(self.video_list)
n_left = self.num_dataload%len(self.video_list)
self.video_list = self.video_list*n_repeat + self.video_list[:n_left]
def _sample_indices(self, record):
"""
:param record: VideoRecord
:return: list
"""
#np.random.seed(1)
average_duration = (record.num_frames - self.new_length + 1) // self.num_segments
if average_duration > 0:
offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration, size=self.num_segments)
elif record.num_frames > self.num_segments:
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_val_indices(self, record):
num_min = self.num_segments + self.new_length - 1
num_select = record.num_frames - self.new_length + 1
if record.num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)])
else:
offsets = | np.zeros((self.num_segments,)) | numpy.zeros |
from operator import le
import os
import math
import warnings
warnings.filterwarnings('ignore', 'The iteration is not making good progress')
import numpy as np
np.set_printoptions(suppress=True)
import scipy
import scipy.stats
from scipy.stats import poisson, uniform, norm
from scipy.fftpack import fft, ifft
from scipy import optimize as opti
import scipy.special as special
from scipy.signal import convolve
from scipy.signal import savgol_filter
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import cm
from matplotlib import colors
from mpl_axes_aligner import align
import h5py
from scipy.interpolate import interp1d
from sklearn.linear_model import orthogonal_mp
from numba import njit
import warnings
warnings.filterwarnings('ignore')
matplotlib.use('pgf')
plt.style.use('default')
plt.rcParams['savefig.dpi'] = 100
plt.rcParams['figure.dpi'] = 100
plt.rcParams['font.size'] = 20
plt.rcParams['lines.markersize'] = 4.0
plt.rcParams['lines.linewidth'] = 2.0
# plt.rcParams['mathtext.fontset'] = 'cm'
plt.rcParams['text.usetex'] = True
plt.rcParams['pgf.texsystem'] = 'pdflatex'
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['pgf.preamble'] = r'\usepackage[detect-all,locale=DE]{siunitx}'
nshannon = 1
window = 1029
gmu = 160.
gsigma = 40.
std = 1.
p = [8., 0.5, 24.]
Thres = {'mcmc':std / gsigma, 'xiaopeip':0, 'lucyddm':0.2, 'fbmp':0, 'fftrans':0.1, 'findpeak':0.1, 'threshold':0, 'firstthres':0, 'omp':0}
d_history = [('TriggerNo', np.uint32), ('ChannelID', np.uint32), ('step', np.uint32), ('loc', np.float32)]
proposal = | np.array((1, 1, 2)) | numpy.array |
# -*- coding: utf-8 -*-
""" classes that implement the blocks for MDF versions 2 and 3
Edit history
Author : yda
Date : 2020-11-12
Package name changed - asammdf to mdfstudio
Functions
---------
* Channel.metadata - Get rid of b" text when decoding byte type data
* ChannelConversion.metadata - Get rid of b" text when decoding byte type data
* ChannelExtension.metadata - Get rid of b" text when decoding byte type data
* ChannelGroup.metadata - Get rid of b" text when decoding byte type data
"""
from datetime import datetime
from getpass import getuser
import logging
from struct import pack, unpack, unpack_from
import sys
from textwrap import wrap
from traceback import format_exc
from numexpr import evaluate
import numpy as np
from . import v2_v3_constants as v23c
from ..version import __version__
from .utils import get_fields, get_text_v3, MdfException, UINT16_u, UINT16_uf
SEEK_START = v23c.SEEK_START
SEEK_END = v23c.SEEK_END
CHANNEL_DISPLAYNAME_u = v23c.CHANNEL_DISPLAYNAME_u
CHANNEL_DISPLAYNAME_uf = v23c.CHANNEL_DISPLAYNAME_uf
CHANNEL_LONGNAME_u = v23c.CHANNEL_LONGNAME_u
CHANNEL_LONGNAME_uf = v23c.CHANNEL_LONGNAME_uf
CHANNEL_SHORT_u = v23c.CHANNEL_SHORT_u
CHANNEL_SHORT_uf = v23c.CHANNEL_SHORT_uf
COMMON_uf = v23c.COMMON_uf
COMMON_u = v23c.COMMON_u
CONVERSION_COMMON_SHORT_uf = v23c.CONVERSION_COMMON_SHORT_uf
SOURCE_COMMON_uf = v23c.SOURCE_COMMON_uf
SOURCE_EXTRA_ECU_uf = v23c.SOURCE_EXTRA_ECU_uf
SOURCE_EXTRA_VECTOR_uf = v23c.SOURCE_EXTRA_VECTOR_uf
SOURCE_COMMON_u = v23c.SOURCE_COMMON_u
SOURCE_EXTRA_ECU_u = v23c.SOURCE_EXTRA_ECU_u
SOURCE_EXTRA_VECTOR_u = v23c.SOURCE_EXTRA_VECTOR_u
logger = logging.getLogger("mdfstudio")
__all__ = [
"Channel",
"ChannelConversion",
"ChannelDependency",
"ChannelExtension",
"ChannelGroup",
"DataBlock",
"DataGroup",
"FileIdentificationBlock",
"HeaderBlock",
"ProgramBlock",
"TextBlock",
"TriggerBlock",
]
class Channel:
""" CNBLOCK class
If the `load_metadata` keyword argument is not provided or is False,
then the conversion, source and display name information is not processed.
CNBLOCK fields
* ``id`` - bytes : block ID; always b'CN'
* ``block_len`` - int : block bytes size
* ``next_ch_addr`` - int : next CNBLOCK address
* ``conversion_addr`` - int : address of channel conversion block
* ``source_addr`` - int : address of channel source block
* ``component_addr`` - int : address of dependency block (CDBLOCK) of this
channel
* ``comment_addr`` - int : address of TXBLOCK that contains the
channel comment
* ``channel_type`` - int : integer code for channel type
* ``short_name`` - bytes : short signal name
* ``description`` - bytes : signal description
* ``start_offset`` - int : start offset in bits to determine the first bit
of the signal in the data record
* ``bit_count`` - int : channel bit count
* ``data_type`` - int : integer code for channel data type
* ``range_flag`` - int : value range valid flag
* ``min_raw_value`` - float : min raw value of all samples
* ``max_raw_value`` - float : max raw value of all samples
* ``sampling_rate`` - float : sampling rate in *'s'* for a virtual time
channel
* ``long_name_addr`` - int : address of TXBLOCK that contains the channel's
name
* ``display_name_addr`` - int : address of TXBLOCK that contains the
channel's display name
* ``aditional_byte_offset`` - int : additional Byte offset of the channel
in the data recor
Other attributes
* ``address`` - int : block address inside mdf file
* ``comment`` - str : channel comment
* ``conversion`` - ChannelConversion : channel conversion; *None* if the channel has
no conversion
* ``display_name`` - str : channel display name
* ``name`` - str : full channel name
* ``source`` - SourceInformation : channel source information; *None* if the channel
has no source information
Parameters
----------
address : int
block address; to be used for objects created from file
stream : handle
file handle; to be used for objects created from file
load_metadata : bool
option to load conversion, source and display_name; default *True*
for dynamically created objects :
see the key-value pairs
Examples
--------
>>> with open('test.mdf', 'rb') as mdf:
... ch1 = Channel(stream=mdf, address=0xBA52)
>>> ch2 = Channel()
>>> ch1.name
'VehicleSpeed'
>>> ch1['id']
b'CN'
"""
__slots__ = (
"name",
"display_name",
"comment",
"conversion",
"source",
"address",
"id",
"block_len",
"next_ch_addr",
"conversion_addr",
"source_addr",
"component_addr",
"comment_addr",
"channel_type",
"short_name",
"description",
"start_offset",
"bit_count",
"data_type",
"range_flag",
"min_raw_value",
"max_raw_value",
"sampling_rate",
"long_name_addr",
"display_name_addr",
"additional_byte_offset",
)
def __init__(self, **kwargs):
super().__init__()
self.name = self.display_name = self.comment = ""
self.conversion = self.source = None
try:
stream = kwargs["stream"]
self.address = address = kwargs["address"]
mapped = kwargs.get("mapped", False)
if mapped:
(size,) = UINT16_uf(stream, address + 2)
if size == v23c.CN_DISPLAYNAME_BLOCK_SIZE:
(
self.id,
self.block_len,
self.next_ch_addr,
self.conversion_addr,
self.source_addr,
self.component_addr,
self.comment_addr,
self.channel_type,
self.short_name,
self.description,
self.start_offset,
self.bit_count,
self.data_type,
self.range_flag,
self.min_raw_value,
self.max_raw_value,
self.sampling_rate,
self.long_name_addr,
self.display_name_addr,
self.additional_byte_offset,
) = CHANNEL_DISPLAYNAME_uf(stream, address)
parsed_strings = kwargs["parsed_strings"]
if parsed_strings is None:
addr = self.long_name_addr
if addr:
self.name = get_text_v3(
address=addr, stream=stream, mapped=mapped
)
else:
self.name = self.short_name.decode("latin-1").strip(
" \t\n\r\0"
)
addr = self.display_name_addr
if addr:
self.display_name = get_text_v3(
address=addr, stream=stream, mapped=mapped
)
else:
self.name, self.display_name = parsed_strings
elif size == v23c.CN_LONGNAME_BLOCK_SIZE:
(
self.id,
self.block_len,
self.next_ch_addr,
self.conversion_addr,
self.source_addr,
self.component_addr,
self.comment_addr,
self.channel_type,
self.short_name,
self.description,
self.start_offset,
self.bit_count,
self.data_type,
self.range_flag,
self.min_raw_value,
self.max_raw_value,
self.sampling_rate,
self.long_name_addr,
) = CHANNEL_LONGNAME_uf(stream, address)
parsed_strings = kwargs["parsed_strings"]
if parsed_strings is None:
addr = self.long_name_addr
if addr:
self.name = get_text_v3(
address=addr, stream=stream, mapped=mapped
)
else:
self.name = self.short_name.decode("latin-1").strip(
" \t\n\r\0"
)
else:
self.name, self.display_name = parsed_strings
else:
(
self.id,
self.block_len,
self.next_ch_addr,
self.conversion_addr,
self.source_addr,
self.component_addr,
self.comment_addr,
self.channel_type,
self.short_name,
self.description,
self.start_offset,
self.bit_count,
self.data_type,
self.range_flag,
self.min_raw_value,
self.max_raw_value,
self.sampling_rate,
) = CHANNEL_SHORT_uf(stream, address)
self.name = self.short_name.decode("latin-1").strip(" \t\n\r\0")
cc_map = kwargs.get("cc_map", {})
si_map = kwargs.get("si_map", {})
address = self.conversion_addr
if address:
try:
if address in cc_map:
conv = cc_map[address]
else:
(size,) = UINT16_uf(stream, address + 2)
raw_bytes = stream[address : address + size]
if raw_bytes in cc_map:
conv = cc_map[raw_bytes]
else:
conv = ChannelConversion(
raw_bytes=raw_bytes,
stream=stream,
address=address,
mapped=mapped,
)
cc_map[raw_bytes] = cc_map[address] = conv
except:
logger.warning(
f"Channel conversion parsing error: {format_exc()}. The error is ignored and the channel conversion is None"
)
conv = None
self.conversion = conv
address = self.source_addr
if address:
try:
if address in si_map:
source = si_map[address]
else:
raw_bytes = stream[address : address + v23c.CE_BLOCK_SIZE]
if raw_bytes in si_map:
source = si_map[raw_bytes]
else:
source = ChannelExtension(
raw_bytes=raw_bytes,
stream=stream,
address=address,
mapped=mapped,
)
si_map[raw_bytes] = si_map[address] = source
except:
logger.warning(
f"Channel source parsing error: {format_exc()}. The error is ignored and the channel source is None"
)
source = None
self.source = source
self.comment = get_text_v3(
address=self.comment_addr, stream=stream, mapped=mapped
)
else:
stream.seek(address + 2)
(size,) = UINT16_u(stream.read(2))
stream.seek(address)
block = stream.read(size)
if size == v23c.CN_DISPLAYNAME_BLOCK_SIZE:
(
self.id,
self.block_len,
self.next_ch_addr,
self.conversion_addr,
self.source_addr,
self.component_addr,
self.comment_addr,
self.channel_type,
self.short_name,
self.description,
self.start_offset,
self.bit_count,
self.data_type,
self.range_flag,
self.min_raw_value,
self.max_raw_value,
self.sampling_rate,
self.long_name_addr,
self.display_name_addr,
self.additional_byte_offset,
) = CHANNEL_DISPLAYNAME_u(block)
parsed_strings = kwargs["parsed_strings"]
if parsed_strings is None:
addr = self.long_name_addr
if addr:
self.name = get_text_v3(
address=addr, stream=stream, mapped=mapped
)
else:
self.name = self.short_name.decode("latin-1").strip(
" \t\n\r\0"
)
addr = self.display_name_addr
if addr:
self.display_name = get_text_v3(
address=addr, stream=stream, mapped=mapped
)
else:
self.name, self.display_name = parsed_strings
elif size == v23c.CN_LONGNAME_BLOCK_SIZE:
(
self.id,
self.block_len,
self.next_ch_addr,
self.conversion_addr,
self.source_addr,
self.component_addr,
self.comment_addr,
self.channel_type,
self.short_name,
self.description,
self.start_offset,
self.bit_count,
self.data_type,
self.range_flag,
self.min_raw_value,
self.max_raw_value,
self.sampling_rate,
self.long_name_addr,
) = CHANNEL_LONGNAME_u(block)
parsed_strings = kwargs["parsed_strings"]
if parsed_strings is None:
addr = self.long_name_addr
if addr:
self.name = get_text_v3(
address=addr, stream=stream, mapped=mapped
)
else:
self.name = self.short_name.decode("latin-1").strip(
" \t\n\r\0"
)
else:
self.name, self.display_name = parsed_strings
else:
(
self.id,
self.block_len,
self.next_ch_addr,
self.conversion_addr,
self.source_addr,
self.component_addr,
self.comment_addr,
self.channel_type,
self.short_name,
self.description,
self.start_offset,
self.bit_count,
self.data_type,
self.range_flag,
self.min_raw_value,
self.max_raw_value,
self.sampling_rate,
) = CHANNEL_SHORT_u(block)
self.name = self.short_name.decode("latin-1").strip(" \t\n\r\0")
cc_map = kwargs.get("cc_map", {})
si_map = kwargs.get("si_map", {})
address = self.conversion_addr
if address:
try:
if address in cc_map:
conv = cc_map[address]
else:
stream.seek(address + 2)
(size,) = UINT16_u(stream.read(2))
stream.seek(address)
raw_bytes = stream.read(size)
if raw_bytes in cc_map:
conv = cc_map[raw_bytes]
else:
conv = ChannelConversion(
raw_bytes=raw_bytes,
stream=stream,
address=address,
mapped=mapped,
)
cc_map[raw_bytes] = cc_map[address] = conv
except:
logger.warning(
f"Channel conversion parsing error: {format_exc()}. The error is ignored and the channel conversion is None"
)
conv = None
self.conversion = conv
address = self.source_addr
if address:
try:
if address in si_map:
source = si_map[address]
else:
stream.seek(address)
raw_bytes = stream.read(v23c.CE_BLOCK_SIZE)
if raw_bytes in si_map:
source = si_map[raw_bytes]
else:
source = ChannelExtension(
raw_bytes=raw_bytes,
stream=stream,
address=address,
mapped=mapped,
)
si_map[raw_bytes] = si_map[address] = source
except:
logger.warning(
f"Channel source parsing error: {format_exc()}. The error is ignored and the channel source is None"
)
si_map[raw_bytes] = source
self.source = source
self.comment = get_text_v3(
address=self.comment_addr, stream=stream, mapped=mapped
)
if self.id != b"CN":
message = f'Expected "CN" block @{hex(address)} but found "{self.id}"'
logger.exception(message)
raise MdfException(message)
except KeyError:
self.address = 0
self.id = b"CN"
self.block_len = kwargs.get("block_len", v23c.CN_DISPLAYNAME_BLOCK_SIZE)
self.next_ch_addr = kwargs.get("next_ch_addr", 0)
self.conversion_addr = kwargs.get("conversion_addr", 0)
self.source_addr = kwargs.get("source_addr", 0)
self.component_addr = kwargs.get("component_addr", 0)
self.comment_addr = kwargs.get("comment_addr", 0)
self.channel_type = kwargs.get("channel_type", 0)
self.short_name = kwargs.get("short_name", (b"\0" * 32))
self.description = kwargs.get("description", (b"\0" * 128))
self.start_offset = kwargs.get("start_offset", 0)
self.bit_count = kwargs.get("bit_count", 8)
self.data_type = kwargs.get("data_type", 0)
self.range_flag = kwargs.get("range_flag", 1)
self.min_raw_value = kwargs.get("min_raw_value", 0)
self.max_raw_value = kwargs.get("max_raw_value", 0)
self.sampling_rate = kwargs.get("sampling_rate", 0)
if self.block_len >= v23c.CN_LONGNAME_BLOCK_SIZE:
self.long_name_addr = kwargs.get("long_name_addr", 0)
if self.block_len >= v23c.CN_DISPLAYNAME_BLOCK_SIZE:
self.display_name_addr = kwargs.get("display_name_addr", 0)
self.additional_byte_offset = kwargs.get("additional_byte_offset", 0)
def to_blocks(self, address, blocks, defined_texts, cc_map, si_map):
key = "long_name_addr"
text = self.name
if self.block_len >= v23c.CN_LONGNAME_BLOCK_SIZE:
if len(text) > 31:
if text in defined_texts:
self[key] = defined_texts[text]
else:
tx_block = TextBlock(text=text)
self[key] = address
defined_texts[text] = address
tx_block.address = address
address += tx_block.block_len
blocks.append(tx_block)
else:
self[key] = 0
self.short_name = text.encode("latin-1", "backslashreplace")[:31]
key = "display_name_addr"
text = self.display_name
if self.block_len >= v23c.CN_DISPLAYNAME_BLOCK_SIZE:
if text:
if text in defined_texts:
self[key] = defined_texts[text]
else:
tx_block = TextBlock(text=text)
self[key] = address
defined_texts[text] = address
tx_block.address = address
address += tx_block.block_len
blocks.append(tx_block)
else:
self[key] = 0
key = "comment_addr"
text = self.comment
if text:
if len(text) < 128:
self.description = text.encode("latin-1", "backslashreplace")[:127]
self[key] = 0
else:
if text in defined_texts:
self[key] = defined_texts[text]
else:
tx_block = TextBlock(text=text)
self[key] = address
defined_texts[text] = address
tx_block.address = address
address += tx_block.block_len
blocks.append(tx_block)
self.description = b"\0"
else:
self[key] = 0
conversion = self.conversion
if conversion:
address = conversion.to_blocks(address, blocks, defined_texts, cc_map)
self.conversion_addr = conversion.address
else:
self.conversion_addr = 0
source = self.source
if source:
address = source.to_blocks(address, blocks, defined_texts, si_map)
self.source_addr = source.address
else:
self.source_addr = 0
blocks.append(self)
self.address = address
address += self.block_len
return address
def metadata(self):
max_len = max(len(key) for key in self)
template = f"{{: <{max_len}}}: {{}}"
metadata = []
lines = f"""
name: {self.name}
display name: {self.display_name}
address: {hex(self.address)}
comment: {self.comment}
""".split(
"\n"
)
keys = (
"id",
"block_len",
"next_ch_addr",
"conversion_addr",
"source_addr",
"component_addr",
"comment_addr",
"channel_type",
"short_name",
"description",
"start_offset",
"bit_count",
"data_type",
"range_flag",
"min_raw_value",
"max_raw_value",
"sampling_rate",
"long_name_addr",
"display_name_addr",
"additional_byte_offset",
)
for key in keys:
if not hasattr(self, key):
continue
val = getattr(self, key)
if key.endswith("addr") or key.startswith("text_"):
lines.append(template.format(key, hex(val)))
elif isinstance(val, float):
lines.append(template.format(key, round(val, 6)))
else:
if isinstance(val, bytes):
try:
lines.append(template.format(key, val.decode()))
except:
lines.append(template.format(key, val.decode('latin-1').strip("\0")))
else:
lines.append(template.format(key, val))
if key == "data_type":
lines[-1] += f" = {v23c.DATA_TYPE_TO_STRING[self.data_type]}"
elif key == "channel_type":
lines[-1] += f" = {v23c.CHANNEL_TYPE_TO_STRING[self.channel_type]}"
for line in lines:
if not line:
metadata.append(line)
else:
for wrapped_line in wrap(line, width=120):
metadata.append(wrapped_line)
return "\n".join(metadata)
def __bytes__(self):
block_len = self.block_len
if block_len == v23c.CN_DISPLAYNAME_BLOCK_SIZE:
return v23c.CHANNEL_DISPLAYNAME_p(
self.id,
self.block_len,
self.next_ch_addr,
self.conversion_addr,
self.source_addr,
self.component_addr,
self.comment_addr,
self.channel_type,
self.short_name,
self.description,
self.start_offset,
self.bit_count,
self.data_type,
self.range_flag,
self.min_raw_value,
self.max_raw_value,
self.sampling_rate,
self.long_name_addr,
self.display_name_addr,
self.additional_byte_offset,
)
elif block_len == v23c.CN_LONGNAME_BLOCK_SIZE:
return v23c.CHANNEL_LONGNAME_p(
self.id,
self.block_len,
self.next_ch_addr,
self.conversion_addr,
self.source_addr,
self.component_addr,
self.comment_addr,
self.channel_type,
self.short_name,
self.description,
self.start_offset,
self.bit_count,
self.data_type,
self.range_flag,
self.min_raw_value,
self.max_raw_value,
self.sampling_rate,
self.long_name_addr,
)
else:
return v23c.CHANNEL_SHORT_p(
self.id,
self.block_len,
self.next_ch_addr,
self.conversion_addr,
self.source_addr,
self.component_addr,
self.comment_addr,
self.channel_type,
self.short_name,
self.description,
self.start_offset,
self.bit_count,
self.data_type,
self.range_flag,
self.min_raw_value,
self.max_raw_value,
self.sampling_rate,
)
def __getitem__(self, item):
return self.__getattribute__(item)
def __setitem__(self, item, value):
self.__setattr__(item, value)
def __contains__(self, item):
return hasattr(self, item)
def __iter__(self):
for attr in dir(self):
if attr[:2] + attr[-2:] == "____":
continue
try:
if callable(getattr(self, attr)):
continue
yield attr
except AttributeError:
continue
def __lt__(self, other):
self_start = self.start_offset
other_start = other.start_offset
try:
self_additional_offset = self.additional_byte_offset
if self_additional_offset:
self_start += 8 * self_additional_offset
other_additional_offset = other.additional_byte_offset
if other_additional_offset:
other_start += 8 * other_additional_offset
except AttributeError:
pass
if self_start < other_start:
result = 1
elif self_start == other_start:
if self.bit_count >= other.bit_count:
result = 1
else:
result = 0
else:
result = 0
return result
def __repr__(self):
fields = []
for attr in dir(self):
if attr[:2] + attr[-2:] == "____":
continue
try:
if callable(getattr(self, attr)):
continue
fields.append(f"{attr}:{getattr(self, attr)}")
except AttributeError:
continue
return f"Channel (name: {self.name}, display name: {self.display_name,}, comment: {self.comment}, address: {hex(self.address)}, fields: {fields})"
class _ChannelConversionBase:
__slots__ = (
"unit",
"unit_field",
"formula",
"formula_field",
"referenced_blocks",
"address",
"id",
"reserved0",
"block_len",
"comment_addr",
"inv_conv_addr",
"conversion_type",
"precision",
"flags",
"ref_param_nr",
"val_param_nr",
"min_phy_value",
"max_phy_value",
"a",
"b",
"P1",
"P2",
"P3",
"P4",
"P5",
"P6",
"P7",
)
class ChannelConversion(_ChannelConversionBase):
""" CCBLOCK class
*ChannelConversion* has the following common fields
* ``id`` - bytes : block ID; always b'CC'
* ``block_len`` - int : block bytes size
* ``range_flag`` - int : value range valid flag
* ``min_phy_value`` - float : min raw value of all samples
* ``max_phy_value`` - float : max raw value of all samples
* ``unit`` - bytes : physical unit
* ``conversion_type`` - int : integer code for conversion type
* ``ref_param_nr`` - int : number of referenced parameters
*ChannelConversion* has the following specific fields
* linear conversion
* ``a`` - float : factor
* ``b`` - float : offset
* ``CANapeHiddenExtra`` - bytes : sometimes CANape appends extra
information; not compliant with MDF specs
* algebraic conversion
* ``formula`` - bytes : ecuation as string
* polynomial or rational conversion
* ``P1`` to ``P6`` - float : parameters
* exponential or logarithmic conversion
* ``P1`` to ``P7`` - float : parameters
* tabular with or without interpolation (grouped by index)
* ``raw_<N>`` - int : N-th raw value (X axis)
* ``phys_<N>`` - float : N-th physical value (Y axis)
* text table conversion
* ``param_val_<N>`` - int : N-th raw value (X axis)
* ``text_<N>`` - N-th text physical value (Y axis)
* text range table conversion
* ``default_lower`` - float : default lower raw value
* ``default_upper`` - float : default upper raw value
* ``default_addr`` - int : address of default text physical value
* ``lower_<N>`` - float : N-th lower raw value
* ``upper_<N>`` - float : N-th upper raw value
* ``text_<N>`` - int : address of N-th text physical value
Other attributes
* ``address`` - int : block address inside mdf file
* ``formula`` - str : formula string in case of algebraic conversion
* ``referenced_blocks`` - list : list of CCBLOCK/TXBLOCK referenced by the conversion
* ``unit`` - str : physical unit
Parameters
----------
address : int
block address inside mdf file
raw_bytes : bytes
complete block read from disk
stream : file handle
mdf file handle
for dynamically created objects :
see the key-value pairs
Examples
--------
>>> with open('test.mdf', 'rb') as mdf:
... cc1 = ChannelConversion(stream=mdf, address=0xBA52)
>>> cc2 = ChannelConversion(conversion_type=0)
>>> cc1['b'], cc1['a']
0, 100.0
"""
def __init__(self, **kwargs):
super().__init__()
self.unit = self.formula = ""
self.referenced_blocks = {}
if "raw_bytes" in kwargs or "stream" in kwargs:
mapped = kwargs.get("mapped", False)
self.address = address = kwargs.get("address", 0)
try:
block = kwargs["raw_bytes"]
(self.id, self.block_len) = COMMON_uf(block)
size = self.block_len
block_size = len(block)
block = block[4:]
stream = kwargs["stream"]
except KeyError:
stream = kwargs["stream"]
if mapped:
(self.id, self.block_len) = COMMON_uf(stream, address)
block_size = size = self.block_len
block = stream[address + 4 : address + size]
else:
stream.seek(address)
block = stream.read(4)
(self.id, self.block_len) = COMMON_u(block)
block_size = size = self.block_len
block = stream.read(size - 4)
(
self.range_flag,
self.min_phy_value,
self.max_phy_value,
self.unit_field,
self.conversion_type,
self.ref_param_nr,
) = CONVERSION_COMMON_SHORT_uf(block)
self.unit = self.unit_field.decode("latin-1").strip(" \t\r\n\0")
conv_type = self.conversion_type
if conv_type == v23c.CONVERSION_TYPE_LINEAR:
(self.b, self.a) = unpack_from("<2d", block, v23c.CC_COMMON_SHORT_SIZE)
if not size == v23c.CC_LIN_BLOCK_SIZE:
self.CANapeHiddenExtra = block[v23c.CC_LIN_BLOCK_SIZE - 4 :]
elif conv_type == v23c.CONVERSION_TYPE_NONE:
pass
elif conv_type == v23c.CONVERSION_TYPE_FORMULA:
self.formula_field = block[v23c.CC_COMMON_SHORT_SIZE :]
self.formula = (
self.formula_field.decode("latin-1")
.strip(" \t\r\n\0")
.replace("x", "X")
)
if "X1" not in self.formula:
self.formula = self.formula.replace("X", "X1")
elif conv_type in (v23c.CONVERSION_TYPE_TABI, v23c.CONVERSION_TYPE_TAB):
nr = self.ref_param_nr
size = v23c.CC_COMMON_BLOCK_SIZE + nr * 16
if block_size == v23c.MAX_UINT16:
stream.seek(address)
raw_bytes = stream.read(size)
conversion = ChannelConversion(
raw_bytes=raw_bytes, stream=stream, address=address
)
conversion.block_len = size
self.update(conversion)
self.referenced_blocks = conversion.referenced_blocks
else:
values = unpack_from(f"<{2*nr}d", block, v23c.CC_COMMON_SHORT_SIZE)
for i in range(nr):
(self[f"raw_{i}"], self[f"phys_{i}"]) = (
values[i * 2],
values[2 * i + 1],
)
elif conv_type in (v23c.CONVERSION_TYPE_POLY, v23c.CONVERSION_TYPE_RAT):
(self.P1, self.P2, self.P3, self.P4, self.P5, self.P6) = unpack_from(
"<6d", block, v23c.CC_COMMON_SHORT_SIZE
)
elif conv_type in (v23c.CONVERSION_TYPE_EXPO, v23c.CONVERSION_TYPE_LOGH):
(
self.P1,
self.P2,
self.P3,
self.P4,
self.P5,
self.P6,
self.P7,
) = unpack_from("<7d", block, v23c.CC_COMMON_SHORT_SIZE)
elif conv_type == v23c.CONVERSION_TYPE_TABX:
nr = self.ref_param_nr
size = v23c.CC_COMMON_BLOCK_SIZE + nr * 40
if block_size == v23c.MAX_UINT16:
stream.seek(address)
raw_bytes = stream.read(size)
conversion = ChannelConversion(
raw_bytes=raw_bytes, stream=stream, address=address
)
conversion.block_len = size
for attr in get_fields(conversion):
setattr(self, attr, getattr(conversion, attr))
self.referenced_blocks = conversion.referenced_blocks
else:
values = unpack_from(
"<" + "d32s" * nr, block, v23c.CC_COMMON_SHORT_SIZE
)
for i in range(nr):
(self[f"param_val_{i}"], self[f"text_{i}"]) = (
values[i * 2],
values[2 * i + 1],
)
elif conv_type == v23c.CONVERSION_TYPE_RTABX:
nr = self.ref_param_nr - 1
size = v23c.CC_COMMON_BLOCK_SIZE + (nr + 1) * 20
if block_size == v23c.MAX_UINT16:
stream.seek(address)
raw_bytes = stream.read(size)
conversion = ChannelConversion(
raw_bytes=raw_bytes, stream=stream, address=address
)
conversion.block_len = size
for attr in get_fields(conversion):
setattr(self, attr, getattr(conversion, attr))
self.referenced_blocks = conversion.referenced_blocks
else:
(
self.default_lower,
self.default_upper,
self.default_addr,
) = unpack_from("<2dI", block, v23c.CC_COMMON_SHORT_SIZE)
if self.default_addr:
self.referenced_blocks["default_addr"] = get_text_v3(
address=self.default_addr,
stream=stream,
mapped=mapped,
decode=False,
)
else:
self.referenced_blocks["default_addr"] = b""
values = unpack_from(
"<" + "2dI" * nr, block, v23c.CC_COMMON_SHORT_SIZE + 20
)
for i in range(nr):
(self[f"lower_{i}"], self[f"upper_{i}"], self[f"text_{i}"]) = (
values[i * 3],
values[3 * i + 1],
values[3 * i + 2],
)
if values[3 * i + 2]:
block = get_text_v3(
address=values[3 * i + 2],
stream=stream,
mapped=mapped,
decode=False,
)
self.referenced_blocks[f"text_{i}"] = block
else:
self.referenced_blocks[f"text_{i}"] = b""
if self.id != b"CC":
message = f'Expected "CC" block @{hex(address)} but found "{self.id}"'
logger.exception(message)
raise MdfException(message)
else:
self.address = 0
self.id = b"CC"
self.unit_field = kwargs.get("unit", b"")
if kwargs["conversion_type"] == v23c.CONVERSION_TYPE_NONE:
self.block_len = v23c.CC_COMMON_BLOCK_SIZE
self.range_flag = kwargs.get("range_flag", 1)
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
self.unit_field = kwargs.get("unit", ("\0" * 20).encode("latin-1"))
self.conversion_type = v23c.CONVERSION_TYPE_NONE
self.ref_param_nr = 0
elif kwargs["conversion_type"] == v23c.CONVERSION_TYPE_LINEAR:
self.block_len = v23c.CC_LIN_BLOCK_SIZE
self.range_flag = kwargs.get("range_flag", 1)
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
self.unit_field = kwargs.get("unit", ("\0" * 20).encode("latin-1"))
self.conversion_type = v23c.CONVERSION_TYPE_LINEAR
self.ref_param_nr = 2
self.b = kwargs.get("b", 0)
self.a = kwargs.get("a", 1)
if not self.block_len == v23c.CC_LIN_BLOCK_SIZE:
self.CANapeHiddenExtra = kwargs["CANapeHiddenExtra"]
elif kwargs["conversion_type"] in (
v23c.CONVERSION_TYPE_POLY,
v23c.CONVERSION_TYPE_RAT,
):
self.block_len = v23c.CC_POLY_BLOCK_SIZE
self.range_flag = kwargs.get("range_flag", 1)
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
self.unit_field = kwargs.get("unit", ("\0" * 20).encode("latin-1"))
self.conversion_type = kwargs["conversion_type"]
self.ref_param_nr = 6
self.P1 = kwargs.get("P1", 0)
self.P2 = kwargs.get("P2", 0)
self.P3 = kwargs.get("P3", 0)
self.P4 = kwargs.get("P4", 0)
self.P5 = kwargs.get("P5", 0)
self.P6 = kwargs.get("P6", 0)
elif kwargs["conversion_type"] in (
v23c.CONVERSION_TYPE_EXPO,
v23c.CONVERSION_TYPE_LOGH,
):
self.block_len = v23c.CC_EXPO_BLOCK_SIZE
self.range_flag = kwargs.get("range_flag", 1)
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
self.unit_field = kwargs.get("unit", ("\0" * 20).encode("latin-1"))
self.conversion_type = v23c.CONVERSION_TYPE_EXPO
self.ref_param_nr = 7
self.P1 = kwargs.get("P1", 0)
self.P2 = kwargs.get("P2", 0)
self.P3 = kwargs.get("P3", 0)
self.P4 = kwargs.get("P4", 0)
self.P5 = kwargs.get("P5", 0)
self.P6 = kwargs.get("P6", 0)
self.P7 = kwargs.get("P7", 0)
elif kwargs["conversion_type"] == v23c.CONVERSION_TYPE_FORMULA:
formula = kwargs["formula"]
formula_len = len(formula)
try:
self.formula = formula.decode("latin-1")
formula += b"\0"
except AttributeError:
self.formula = formula
formula = formula.encode("latin-1") + b"\0"
self.block_len = 46 + formula_len + 1
self.range_flag = kwargs.get("range_flag", 1)
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
self.unit_field = kwargs.get("unit", ("\0" * 20).encode("latin-1"))
self.conversion_type = v23c.CONVERSION_TYPE_FORMULA
self.ref_param_nr = formula_len
self.formula_field = formula
elif kwargs["conversion_type"] in (
v23c.CONVERSION_TYPE_TABI,
v23c.CONVERSION_TYPE_TAB,
):
nr = kwargs["ref_param_nr"]
self.block_len = v23c.CC_COMMON_BLOCK_SIZE + nr * 2 * 8
self.range_flag = kwargs.get("range_flag", 1)
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
self.unit_field = kwargs.get("unit", ("\0" * 20).encode("latin-1"))
self.conversion_type = kwargs["conversion_type"]
self.ref_param_nr = nr
for i in range(nr):
self[f"raw_{i}"] = kwargs[f"raw_{i}"]
self[f"phys_{i}"] = kwargs[f"phys_{i}"]
elif kwargs["conversion_type"] == v23c.CONVERSION_TYPE_TABX:
nr = kwargs["ref_param_nr"]
self.block_len = v23c.CC_COMMON_BLOCK_SIZE + 40 * nr
self.range_flag = kwargs.get("range_flag", 0)
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
self.unit_field = kwargs.get("unit", ("\0" * 20).encode("latin-1"))
self.conversion_type = v23c.CONVERSION_TYPE_TABX
self.ref_param_nr = nr
for i in range(nr):
self[f"param_val_{i}"] = kwargs[f"param_val_{i}"]
self[f"text_{i}"] = kwargs[f"text_{i}"]
elif kwargs["conversion_type"] == v23c.CONVERSION_TYPE_RTABX:
nr = kwargs["ref_param_nr"]
self.block_len = v23c.CC_COMMON_BLOCK_SIZE + 20 * nr
self.range_flag = kwargs.get("range_flag", 0)
self.min_phy_value = kwargs.get("min_phy_value", 0)
self.max_phy_value = kwargs.get("max_phy_value", 0)
self.unit_field = kwargs.get("unit", ("\0" * 20).encode("latin-1"))
self.conversion_type = v23c.CONVERSION_TYPE_RTABX
self.ref_param_nr = nr
self.default_lower = 0
self.default_upper = 0
self.default_addr = 0
key = "default_addr"
if key in kwargs:
self.referenced_blocks[key] = kwargs[key]
else:
self.referenced_blocks[key] = b""
for i in range(nr - 1):
self[f"lower_{i}"] = kwargs[f"lower_{i}"]
self[f"upper_{i}"] = kwargs[f"upper_{i}"]
key = f"text_{i}"
self[key] = 0
self.referenced_blocks[key] = kwargs[key]
else:
message = (
f'Conversion type "{kwargs["conversion_type"]}" not implemented'
)
logger.exception(message)
raise MdfException(message)
def to_blocks(self, address, blocks, defined_texts, cc_map):
self.unit_field = self.unit.encode("latin-1", "ignore")[:19]
if self.conversion_type == v23c.CONVERSION_TYPE_FORMULA:
formula = self.formula
if not formula.endswith("\0"):
formula += "\0"
self.formula_field = formula.encode("latin-1")
self.block_len = v23c.CC_COMMON_BLOCK_SIZE + len(self.formula_field)
for key, block in self.referenced_blocks.items():
if block:
if isinstance(block, ChannelConversion):
address = block.to_blocks(address, blocks, defined_texts, cc_map)
self[key] = block.address
else:
text = block
if text in defined_texts:
self[key] = defined_texts[text]
else:
block = TextBlock(text=text)
defined_texts[text] = address
blocks.append(block)
self[key] = address
address += block.block_len
else:
self[key] = 0
bts = bytes(self)
if bts in cc_map:
self.address = cc_map[bts]
else:
blocks.append(bts)
self.address = address
cc_map[bts] = address
size = self.block_len
address += len(bts)
return address
def metadata(self, indent=""):
conv = self.conversion_type
if conv == v23c.CONVERSION_TYPE_NONE:
keys = v23c.KEYS_CONVERSION_NONE
elif conv == v23c.CONVERSION_TYPE_FORMULA:
keys = v23c.KEYS_CONVERSION_FORMULA
elif conv == v23c.CONVERSION_TYPE_LINEAR:
keys = v23c.KEYS_CONVERSION_LINEAR
if not self.block_len == v23c.CC_LIN_BLOCK_SIZE:
keys += ("CANapeHiddenExtra",)
elif conv in (v23c.CONVERSION_TYPE_POLY, v23c.CONVERSION_TYPE_RAT):
keys = v23c.KEYS_CONVERSION_POLY_RAT
elif conv in (v23c.CONVERSION_TYPE_EXPO, v23c.CONVERSION_TYPE_LOGH):
keys = v23c.KEYS_CONVERSION_EXPO_LOGH
elif conv in (v23c.CONVERSION_TYPE_TABI, v23c.CONVERSION_TYPE_TAB):
nr = self.ref_param_nr
keys = list(v23c.KEYS_CONVERSION_NONE)
for i in range(nr):
keys.append(f"raw_{i}")
keys.append(f"phys_{i}")
elif conv == v23c.CONVERSION_TYPE_RTABX:
nr = self.ref_param_nr
keys = list(v23c.KEYS_CONVERSION_NONE)
keys += ["default_lower", "default_upper", "default_addr"]
for i in range(nr - 1):
keys.append(f"lower_{i}")
keys.append(f"upper_{i}")
keys.append(f"text_{i}")
elif conv == v23c.CONVERSION_TYPE_TABX:
nr = self.ref_param_nr
keys = list(v23c.KEYS_CONVERSION_NONE)
for i in range(nr):
keys.append(f"param_val_{i}")
keys.append(f"text_{i}")
max_len = max(len(key) for key in keys)
template = f"{{: <{max_len}}}: {{}}"
metadata = []
lines = f"""
address: {hex(self.address)}
""".split(
"\n"
)
for key in keys:
val = getattr(self, key)
if key.endswith("addr") or key.startswith("text_") and isinstance(val, int):
lines.append(template.format(key, hex(val)))
elif isinstance(val, float):
lines.append(template.format(key, round(val, 6)))
else:
if isinstance(val, bytes):
try:
lines.append(template.format(key, val.decode()))
except:
lines.append(template.format(key, val.decode('latin-1').strip("\0")))
else:
lines.append(template.format(key, val))
if key == "conversion_type":
lines[
-1
] += f" [{v23c.CONVERSION_TYPE_TO_STRING[self.conversion_type]}]"
elif self.referenced_blocks and key in self.referenced_blocks:
val = self.referenced_blocks[key]
if isinstance(val, bytes):
lines[-1] += f" (= {str(val)[1:]})"
else:
lines[-1] += f" (= CCBLOCK @ {hex(val.address)})"
if self.referenced_blocks:
max_len = max(len(key) for key in self.referenced_blocks)
template = f"{{: <{max_len}}}: {{}}"
lines.append("")
lines.append("Referenced blocks:")
for key, block in self.referenced_blocks.items():
if isinstance(block, ChannelConversion):
lines.append(template.format(key, ""))
lines.extend(block.metadata(indent + " ").split("\n"))
else:
lines.append(template.format(key, str(block)[1:]))
for line in lines:
if not line:
metadata.append(line)
else:
for wrapped_line in wrap(
line, initial_indent=indent, subsequent_indent=indent, width=120
):
metadata.append(wrapped_line)
return "\n".join(metadata)
def convert(self, values):
conversion_type = self.conversion_type
if conversion_type == v23c.CONVERSION_TYPE_NONE:
pass
elif conversion_type == v23c.CONVERSION_TYPE_LINEAR:
a = self.a
b = self.b
if (a, b) != (1, 0):
values = values * a
if b:
values += b
elif conversion_type in (v23c.CONVERSION_TYPE_TABI, v23c.CONVERSION_TYPE_TAB):
nr = self.ref_param_nr
raw_vals = [self[f"raw_{i}"] for i in range(nr)]
raw_vals = np.array(raw_vals)
phys = [self[f"phys_{i}"] for i in range(nr)]
phys = np.array(phys)
if conversion_type == v23c.CONVERSION_TYPE_TABI:
values = np.interp(values, raw_vals, phys)
else:
dim = raw_vals.shape[0]
inds = np.searchsorted(raw_vals, values)
inds[inds >= dim] = dim - 1
inds2 = inds - 1
inds2[inds2 < 0] = 0
cond = np.abs(values - raw_vals[inds]) >= np.abs(
values - raw_vals[inds2]
)
values = np.where(cond, phys[inds2], phys[inds])
elif conversion_type == v23c.CONVERSION_TYPE_TABX:
nr = self.ref_param_nr
raw_vals = [self[f"param_val_{i}"] for i in range(nr)]
raw_vals = np.array(raw_vals)
phys = [self[f"text_{i}"] for i in range(nr)]
phys = np.array(phys)
x = sorted(zip(raw_vals, phys))
raw_vals = np.array([e[0] for e in x], dtype="<i8")
phys = np.array([e[1] for e in x])
default = b""
idx1 = np.searchsorted(raw_vals, values, side="right") - 1
idx2 = np.searchsorted(raw_vals, values, side="left")
idx = | np.argwhere(idx1 != idx2) | numpy.argwhere |
from typing import List, Tuple
import numpy as np
import tensorflow as tf
CHANNELS = 1
CLASSES = 2
def _create_img_mask(nx: int, ny: int,
ncircles: int = 10,
radius_range: Tuple[int, int] = (3, 10),
border: int = 32,
sigma: int = 20) -> Tuple[np.array, np.array]:
img = np.ones((nx, ny, 1))
mask = np.zeros((nx, ny), dtype=np.bool)
for _ in range(ncircles):
a = np.random.randint(border, nx - border)
b = np.random.randint(border, ny - border)
r = np.random.randint(*radius_range)
h = np.random.randint(1, 255)
y, x = np.ogrid[-a:nx - a, -b: ny - b]
m = x ** 2 + y ** 2 <= r**2
mask = np.logical_or(mask, m)
img[m] = h
img += np.random.normal(scale=sigma, size=img.shape)
img -= np.amin(img)
img /= np.amax(img)
return img, mask
def _create_samples(N: int, nx: int, ny: int,
**kwargs) -> Tuple[np.array, np.array]:
imgs = | np.empty((N, nx, ny, 1)) | numpy.empty |
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import os
import tensorflow as tf
from utils.data import load_data
from utils.data import one_hot_encode
BATCH_SIZE = "batch_size"
CELL_SIZE = "cell_size"
def predict(dataset, model, model_path, **kwargs):
"""Returns classification for given data
:param dataset: The dataset to be classified using the trained model.
:param model: The model name to be used for classification.
:param model_path: The path where the trained model is saved.
:return:
"""
init_op = tf.group(
tf.local_variables_initializer(), tf.global_variables_initializer()
)
accuracy_tensor_name = "accuracy/accuracy/Mean:0"
prediction_tensor_name = "accuracy/predicted_class:0"
assert BATCH_SIZE in kwargs, "KeyNotFound : {}".format(BATCH_SIZE)
assert (
type(kwargs[BATCH_SIZE]) is int
), "Expected data type : int, but {} is {}".format(
kwargs[BATCH_SIZE], type(kwargs[BATCH_SIZE])
)
if model == 1:
# CNN-SVM
feed_dict = {"x_input:0": None, "y_input:0": None, "p_keep:0": 1.0}
accuracy_tensor_name = "metrics/accuracy/Mean:0"
prediction_tensor_name = "metrics/predicted_class:0"
elif model == 2:
# GRU-SVM
assert CELL_SIZE in kwargs, "KeyNotFound : {}".format(CELL_SIZE)
assert (
type(kwargs[CELL_SIZE]) is int
), "Expected data type : int, but {} is {}".format(
kwargs[CELL_SIZE], type(kwargs[CELL_SIZE])
)
initial_state = np.zeros([kwargs[BATCH_SIZE], kwargs[CELL_SIZE] * 5])
initial_state = initial_state.astype(np.float32)
feed_dict = {
"input/x_input:0": None,
"input/y_input:0": None,
"initial_state:0": initial_state,
"p_keep:0": 1.0,
}
elif model == 3:
# MLP-SVM
feed_dict = {"input/x_input:0": None, "input/y_input:0": None}
predictions_array = | np.array([]) | numpy.array |
import numpy as np
import numpy.testing as npt
from dipy.data import get_data
from dipy.sims.voxel import add_noise
from dipy.segment.mrf import (ConstantObservationModel,
IteratedConditionalModes)
from dipy.segment.tissue import (TissueClassifierHMRF)
# Load a coronal slice from a T1-weighted MRI
fname = get_data('t1_coronal_slice')
single_slice = np.load(fname)
# Stack a few copies to form a 3D volume
nslices = 5
image = np.zeros(shape=single_slice.shape + (nslices,))
image[..., :nslices] = single_slice[..., None]
# Set up parameters
nclasses = 4
beta = np.float64(0.0)
max_iter = 10
background_noise = True
# Making squares
square = np.zeros((256, 256, 3), dtype=np.int16)
square[42:213, 42:213, :] = 1
square[71:185, 71:185, :] = 2
square[99:157, 99:157, :] = 3
square_gauss = np.zeros((256, 256, 3)) + 0.001
square_gauss = add_noise(square_gauss, 10000, 1, noise_type='gaussian')
square_gauss[42:213, 42:213, :] = 1
noise_1 = np.random.normal(1.001, 0.0001,
size=square_gauss[42:213, 42:213, :].shape)
square_gauss[42:213, 42:213, :] = square_gauss[42:213, 42:213, :] + noise_1
square_gauss[71:185, 71:185, :] = 2
noise_2 = np.random.normal(2.001, 0.0001,
size=square_gauss[71:185, 71:185, :].shape)
square_gauss[71:185, 71:185, :] = square_gauss[71:185, 71:185, :] + noise_2
square_gauss[99:157, 99:157, :] = 3
noise_3 = np.random.normal(3.001, 0.0001,
size=square_gauss[99:157, 99:157, :].shape)
square_gauss[99:157, 99:157, :] = square_gauss[99:157, 99:157, :] + noise_3
square_1 = np.zeros((256, 256, 3)) + 0.001
square_1 = add_noise(square_1, 10000, 1, noise_type='gaussian')
temp_1 = np.random.random_integers(20, size=(171, 171, 3))
temp_1 = np.where(temp_1 < 20, 1, 3)
square_1[42:213, 42:213, :] = temp_1
temp_2 = np.random.random_integers(20, size=(114, 114, 3))
temp_2 = np.where(temp_2 < 19, 2, 1)
square_1[71:185, 71:185, :] = temp_2
temp_3 = np.random.random_integers(20, size=(58, 58, 3))
temp_3 = np.where(temp_3 < 20, 3, 1)
square_1[99:157, 99:157, :] = temp_3
def test_greyscale_image():
com = ConstantObservationModel()
icm = IteratedConditionalModes()
mu, sigma = com.initialize_param_uniform(image, nclasses)
sigmasq = sigma ** 2
npt.assert_array_almost_equal(mu, np.array([0., 0.25, 0.5, 0.75]))
npt.assert_array_almost_equal(sigma, np.array([1.0, 1.0, 1.0, 1.0]))
npt.assert_array_almost_equal(sigmasq, np.array([1.0, 1.0, 1.0, 1.0]))
neglogl = com.negloglikelihood(image, mu, sigmasq, nclasses)
npt.assert_(neglogl[100, 100, 1, 0] != neglogl[100, 100, 1, 1])
npt.assert_(neglogl[100, 100, 1, 1] != neglogl[100, 100, 1, 2])
npt.assert_(neglogl[100, 100, 1, 2] != neglogl[100, 100, 1, 3])
npt.assert_(neglogl[100, 100, 1, 1] != neglogl[100, 100, 1, 3])
initial_segmentation = icm.initialize_maximum_likelihood(neglogl)
npt.assert_(initial_segmentation.max() == nclasses - 1)
npt.assert_(initial_segmentation.min() == 0)
PLN = icm.prob_neighborhood(initial_segmentation, beta, nclasses)
print(PLN.shape)
npt.assert_(np.all((PLN >= 0) & (PLN <= 1.0)))
if beta == 0.0:
npt.assert_almost_equal(PLN[50, 50, 1, 0], 0.25, True)
npt.assert_almost_equal(PLN[50, 50, 1, 1], 0.25, True)
npt.assert_almost_equal(PLN[50, 50, 1, 2], 0.25, True)
npt.assert_almost_equal(PLN[50, 50, 1, 3], 0.25, True)
npt.assert_almost_equal(PLN[147, 129, 1, 0], 0.25, True)
npt.assert_almost_equal(PLN[147, 129, 1, 1], 0.25, True)
npt.assert_almost_equal(PLN[147, 129, 1, 2], 0.25, True)
npt.assert_almost_equal(PLN[147, 129, 1, 3], 0.25, True)
npt.assert_almost_equal(PLN[61, 152, 1, 0], 0.25, True)
npt.assert_almost_equal(PLN[61, 152, 1, 1], 0.25, True)
npt.assert_almost_equal(PLN[61, 152, 1, 2], 0.25, True)
npt.assert_almost_equal(PLN[61, 152, 1, 3], 0.25, True)
npt.assert_almost_equal(PLN[100, 100, 1, 0], 0.25, True)
npt.assert_almost_equal(PLN[100, 100, 1, 1], 0.25, True)
npt.assert_almost_equal(PLN[100, 100, 1, 2], 0.25, True)
| npt.assert_almost_equal(PLN[100, 100, 1, 3], 0.25, True) | numpy.testing.assert_almost_equal |
import os
import numpy as np
import matplotlib.pyplot as plt
import math
import sys
import cv2
import torch
from PIL import Image
# import model
sys.path.append(".")
import pclpyd as pcl
# import pyzed.sl as sl
sys.path.append("..")
import pclpyd as pcl
def convertply2numpy( src):
res = np.zeros([src['vertex'].data['x'].shape[0],6])
for i in range (src['vertex'].data['x'].shape[0]):
res[i,0] = src['vertex'].data['x'][i]
res[i,1] = src['vertex'].data['y'][i]
res[i,2] = src['vertex'].data['z'][i]
res[i,3] = src['vertex'].data['red'][i]
res[i,4] = src['vertex'].data['green'][i]
res[i,5] = src['vertex'].data['blue'][i]
return res
class CVis:
def __init__(self,
xyzrelist=[1,-1,1],ballradius=2,grab_pcl_fuc=None,
cmap = plt.cm.cool(np.linspace(0,1,50)),background=(0, 0, 0),
cvh=128*6,cvw=128*9,cvz=300*3,showrot=False,magnifyBlue=0,
freezerot=False,normalizecolor=True,waittime=0,_3d=True,
arg_set_func=None,
switch_model_func=None,
switch_dataset_func=None,
switch_pcl_func=None,):
self.set(xyzrelist,ballradius,grab_pcl_fuc,cmap,background,
cvh,cvw,cvz,showrot,magnifyBlue,freezerot,normalizecolor,waittime,_3d,
arg_set_func,switch_model_func,switch_dataset_func,switch_pcl_func,)
self.c_gt=None
self.c_pred=None
self.rgb=None
self.mousex = 0.5
self.mousey = 0.5
self.zoom = 1.0
self.changed = True
cv2.namedWindow('show3d')
cv2.moveWindow('show3d', 0, 0)
cv2.setMouseCallback('show3d', self.onmouse)
pass
def set(self,
xyzrelist=None,ballradius=None,grab_pcl_fuc=None,
cmap = None,background=None,
cvh=None,cvw=None,cvz=None,showrot=None,magnifyBlue=None,
freezerot=None,normalizecolor=None,waittime=None,_3d=None,
arg_set_func=None,
switch_model_func=None,
switch_dataset_func=None,
switch_pcl_func=None,):
if arg_set_func is not None:
arg_set_func(self)
else:
if cvh is not None :
self.cvh = cvh
if cvw is not None :
self.cvw = cvw
if cvz is not None :
self.cvz = cvz
if freezerot is not None:
self.freezerot=freezerot
if normalizecolor is not None:
self.normalizecolor=normalizecolor
if waittime is not None:
self.waittime=waittime
if showrot is not None:
self.showrot=showrot
if magnifyBlue is not None:
self.magnifyBlue=magnifyBlue
if ballradius is not None :
self.ballradius = ballradius
if background is not None :
self.background=background
if grab_pcl_fuc is not None :
self.grab_pcl_func = grab_pcl_fuc
if switch_model_func is not None:
self.switch_model_func=switch_model_func
if switch_dataset_func is not None:
self.switch_dataset_func=switch_dataset_func
if switch_pcl_func is not None:
self.switch_pcl_func=switch_pcl_func
if xyzrelist is not None :
self.set_xyz_reverse(xyzrelist)
if cmap is not None:
self.cmap = cmap
if _3d is not None:
self._3d = _3d
def onmouse(self,*args):
# global mousex, mousey, changed
x = args[1]
y = args[2]
self.mousex = x / float(self.cvw)
self.mousey = y / float(self.cvh)
self.changed = True
def resample_depth(self, img, w, h, typed='float'):
imgd = Image.fromarray(img.astype(typed))
imgd = np.array(imgd.resize((w,h),Image.ANTIALIAS))
return imgd
def mapcolor(self,npoints,c_gt=None):
if self.rgb is None:
self.rgb = np.zeros((npoints,3), dtype='float32') + 255
if c_gt is not None:
# self.c1 = np.zeros((len(self.pointcloud),), dtype='float32') + 255
# self.c2 = np.zeros((len(self.pointcloud),), dtype='float32') + 255
self.rgb = np.zeros((c_gt.shape[0],3), dtype='float32') + 255
self.rgb[:,0] = c_gt[:, 0]
self.rgb[:,1] = c_gt[:, 1]
self.rgb[:,2] = c_gt[:, 2]
else:
self.rgb = np.zeros((npoints,3), dtype='float32') + 255
if self.normalizecolor:
self.rgb[:,0] /= (self.rgb[:,0].max() + 1e-14) / 255.0
self.rgb[:,1] /= (self.rgb[:,1].max() + 1e-14) / 255.0
self.rgb[:,2] /= (self.rgb[:,2].max() + 1e-14) / 255.0
self.rgb[:,0] = np.require(self.rgb[:,0], 'float32', 'C')
self.rgb[:,1] = np.require(self.rgb[:,1], 'float32', 'C')
self.rgb[:,2] = np.require(self.rgb[:,2], 'float32', 'C')
def render(self):
if self.rgb is None:
return
rotmat = | np.eye(3) | numpy.eye |
import numpy as np
import brainscore
from brainio.assemblies import DataAssembly
from brainscore.benchmarks._properties_common import PropertiesBenchmark, _assert_texture_activations
from brainscore.benchmarks._properties_common import calc_texture_modulation, calc_sparseness, calc_variance_ratio
from brainscore.metrics.ceiling import NeuronalPropertyCeiling
from brainscore.metrics.distribution_similarity import BootstrapDistributionSimilarity, ks_similarity
from result_caching import store
ASSEMBLY_NAME = 'movshon.FreemanZiemba2013_V1_properties'
REGION = 'V1'
TIMEBINS = [(70, 170)]
PARENT_TEXTURE_MODULATION = 'V1-texture_modulation'
PARENT_SELECTIVITY = 'V1-response_selectivity'
PARENT_MAGNITUDE = 'V1-response_magnitude'
PROPERTY_NAMES = ['texture_modulation_index', 'absolute_texture_modulation_index', 'texture_selectivity',
'noise_selectivity', 'texture_sparseness', 'noise_sparseness', 'variance_ratio', 'sample_variance',
'family_variance', 'max_texture', 'max_noise']
BIBTEX = """@article{Freeman2013,
author = {<NAME>, <NAME>. and Heeger, <NAME>. and <NAME>. and <NAME>.},
doi = {10.1038/nn.3402},
issn = {10976256},
journal = {Nature Neuroscience},
number = {7},
pages = {974--981},
pmid = {23685719},
publisher = {Nature Publishing Group},
title = {{A functional and perceptual signature of the second visual area in primates}},
url = {http://dx.doi.org/10.1038/nn.3402},
volume = {16},
year = {2013}
}
"""
RESPONSE_THRESHOLD = 5
def _MarquesFreemanZiemba2013V1Property(property_name, parent):
assembly = brainscore.get_assembly(ASSEMBLY_NAME)
similarity_metric = BootstrapDistributionSimilarity(similarity_func=ks_similarity, property_name=property_name)
ceil_func = NeuronalPropertyCeiling(similarity_metric)
return PropertiesBenchmark(identifier=f'dicarlo.Marques_freemanziemba2013-{property_name}', assembly=assembly,
neuronal_property=freemanziemba2013_properties, similarity_metric=similarity_metric,
timebins=TIMEBINS,
parent=parent, ceiling_func=ceil_func, bibtex=BIBTEX, version=1)
def MarquesFreemanZiemba2013V1TextureModulationIndex():
property_name = 'texture_modulation_index'
parent = PARENT_TEXTURE_MODULATION
return _MarquesFreemanZiemba2013V1Property(property_name=property_name, parent=parent)
def MarquesFreemanZiemba2013V1AbsoluteTextureModulationIndex():
property_name = 'absolute_texture_modulation_index'
parent = PARENT_TEXTURE_MODULATION
return _MarquesFreemanZiemba2013V1Property(property_name=property_name, parent=parent)
def MarquesFreemanZiemba2013V1TextureSelectivity():
property_name = 'texture_selectivity'
parent = PARENT_SELECTIVITY
return _MarquesFreemanZiemba2013V1Property(property_name=property_name, parent=parent)
def MarquesFreemanZiemba2013V1TextureSparseness():
property_name = 'texture_sparseness'
parent = PARENT_SELECTIVITY
return _MarquesFreemanZiemba2013V1Property(property_name=property_name, parent=parent)
def MarquesFreemanZiemba2013V1VarianceRatio():
property_name = 'variance_ratio'
parent = PARENT_SELECTIVITY
return _MarquesFreemanZiemba2013V1Property(property_name=property_name, parent=parent)
def MarquesFreemanZiemba2013V1MaxTexture():
property_name = 'max_texture'
parent = PARENT_MAGNITUDE
return _MarquesFreemanZiemba2013V1Property(property_name=property_name, parent=parent)
def MarquesFreemanZiemba2013V1MaxNoise():
property_name = 'max_noise'
parent = PARENT_MAGNITUDE
return _MarquesFreemanZiemba2013V1Property(property_name=property_name, parent=parent)
@store(identifier_ignore=['responses', 'baseline'])
def freemanziemba2013_properties(model_identifier, responses, baseline):
_assert_texture_activations(responses)
responses = responses.sortby(['type', 'family', 'sample'])
type = np.array(sorted(set(responses.type.values)))
family = np.array(sorted(set(responses.family.values)))
sample = np.array(sorted(set(responses.sample.values)))
responses = responses.values
baseline = baseline.values
assert responses.shape[0] == baseline.shape[0]
n_neuroids = responses.shape[0]
responses = responses.reshape(n_neuroids, len(type), len(family), len(sample))
responses_spikes = responses / 10
responses_spikes = np.sqrt(responses_spikes) + np.sqrt(responses_spikes + 1)
responses -= baseline.reshape((-1, 1, 1, 1))
max_texture = np.max((responses.reshape((n_neuroids, 2, -1)))[:, 1, :], axis=1, keepdims=True)
max_noise = np.max((responses.reshape((n_neuroids, 2, -1)))[:, 0, :], axis=1, keepdims=True)
max_response = np.max(responses.reshape((n_neuroids, -1)), axis=1, keepdims=True)
responses_family = responses.mean(axis=3)
texture_modulation_index = np.zeros((n_neuroids, 1))
texture_selectivity = np.zeros((n_neuroids, 1))
noise_selectivity = np.zeros((n_neuroids, 1))
texture_sparseness = | np.zeros((n_neuroids, 1)) | numpy.zeros |
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2020 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for Mir class.
Performs a series of test for the Mir class, which inherits from UVData. Note that
there is a separate test module for the MirParser class (mir_parser.py), which is
what is used to read the raw binary data into something that the Mir class can
manipulate into a UVData object.
"""
import os
import pytest
import numpy as np
from ... import UVData
from ...data import DATA_PATH
from ...uvdata.mir import mir_parser
@pytest.fixture
def mir_data_object():
testfile = os.path.join(DATA_PATH, "sma_test.mir")
mir_data = mir_parser.MirParser(
testfile, load_vis=True, load_raw=True, load_auto=True,
)
yield mir_data
# cleanup
del mir_data
@pytest.fixture
def uv_in_ms(tmp_path):
uv_in = UVData()
testfile = os.path.join(DATA_PATH, "sma_test.mir")
write_file = os.path.join(tmp_path, "outtest_mir.ms")
# Currently only one source is supported.
uv_in.read(testfile)
uv_out = UVData()
yield uv_in, uv_out, write_file
# cleanup
del uv_in, uv_out
@pytest.fixture
def uv_in_uvfits(tmp_path):
uv_in = UVData()
testfile = os.path.join(DATA_PATH, "sma_test.mir/")
write_file = os.path.join(tmp_path, "outtest_mir.uvfits")
# Currently only one source is supported.
uv_in.read(testfile, pseudo_cont=True)
uv_out = UVData()
yield uv_in, uv_out, write_file
# cleanup
del uv_in, uv_out
@pytest.fixture
def uv_in_uvh5(tmp_path):
uv_in = UVData()
testfile = os.path.join(DATA_PATH, "sma_test.mir")
write_file = os.path.join(tmp_path, "outtest_mir.uvh5")
# Currently only one source is supported.
uv_in.read(testfile)
uv_out = UVData()
yield uv_in, uv_out, write_file
# cleanup
del uv_in, uv_out
@pytest.mark.filterwarnings("ignore:LST values stored in this file are not ")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_read_mir_write_uvfits(uv_in_uvfits, future_shapes):
"""
Mir to uvfits loopback test.
Read in Mir files, write out as uvfits, read back in and check for
object equality.
"""
mir_uv, uvfits_uv, testfile = uv_in_uvfits
if future_shapes:
mir_uv.use_future_array_shapes()
mir_uv.write_uvfits(testfile, spoof_nonessential=True)
uvfits_uv.read_uvfits(testfile)
if future_shapes:
uvfits_uv.use_future_array_shapes()
# UVFITS doesn't allow for numbering of spectral windows like MIR does, so
# we need an extra bit of handling here
assert len(np.unique(mir_uv.spw_array)) == len(np.unique(uvfits_uv.spw_array))
spw_dict = {idx: jdx for idx, jdx in zip(uvfits_uv.spw_array, mir_uv.spw_array)}
assert np.all(
[
idx == spw_dict[jdx]
for idx, jdx in zip(mir_uv.flex_spw_id_array, uvfits_uv.flex_spw_id_array,)
]
)
# Now that we've checked, set this things as equivalent
uvfits_uv.spw_array = mir_uv.spw_array
uvfits_uv.flex_spw_id_array = mir_uv.flex_spw_id_array
# Check the history first via find
assert 0 == uvfits_uv.history.find(
mir_uv.history + " Read/written with pyuvdata version:"
)
mir_uv.history = uvfits_uv.history
# We have to do a bit of special handling for the phase_center_catalog, because
# _very_ small errors (like last bit in the mantissa) creep in when passing through
# the util function transform_sidereal_coords (for mutli-phase-ctr datasets). Verify
# the two match up in terms of their coordinates
for cat_name in mir_uv.phase_center_catalog.keys():
assert np.isclose(
mir_uv.phase_center_catalog[cat_name]["cat_lat"],
uvfits_uv.phase_center_catalog[cat_name]["cat_lat"],
)
assert np.isclose(
mir_uv.phase_center_catalog[cat_name]["cat_lon"],
uvfits_uv.phase_center_catalog[cat_name]["cat_lon"],
)
uvfits_uv.phase_center_catalog = mir_uv.phase_center_catalog
# There's a minor difference between what SMA calculates online for app coords
# and what pyuvdata calculates, to the tune of ~1 arcsec. Check those values here,
# then set them equal to one another.
assert np.all(
np.abs(mir_uv.phase_center_app_ra - uvfits_uv.phase_center_app_ra) < 1e-5
)
assert np.all(
np.abs(mir_uv.phase_center_app_dec - uvfits_uv.phase_center_app_dec) < 1e-5
)
mir_uv._set_app_coords_helper()
uvfits_uv._set_app_coords_helper()
# make sure filenames are what we expect
assert mir_uv.filename == ["sma_test.mir"]
assert uvfits_uv.filename == ["outtest_mir.uvfits"]
mir_uv.filename = uvfits_uv.filename
assert mir_uv == uvfits_uv
# Since mir is mutli-phase-ctr by default, this should effectively be a no-op
mir_uv._set_multi_phase_center()
assert mir_uv == uvfits_uv
@pytest.mark.filterwarnings("ignore:LST values stored in this file are not ")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_read_mir_write_ms(uv_in_ms, future_shapes):
"""
Mir to uvfits loopback test.
Read in Mir files, write out as ms, read back in and check for
object equality.
"""
pytest.importorskip("casacore")
mir_uv, ms_uv, testfile = uv_in_ms
if future_shapes:
mir_uv.use_future_array_shapes()
mir_uv.write_ms(testfile, clobber=True)
ms_uv.read(testfile)
if future_shapes:
ms_uv.use_future_array_shapes()
# There are some minor differences between the values stored by MIR and that
# calculated by UVData. Since MS format requires these to be calculated on the fly,
# we calculate them here just to verify that everything is looking okay.
mir_uv.set_lsts_from_time_array()
mir_uv._set_app_coords_helper()
# These reorderings just make sure that data from the two formats are lined up
# correctly.
mir_uv.reorder_freqs(spw_order="number")
ms_uv.reorder_blts()
# MS doesn't have the concept of an "instrument" name like FITS does, and instead
# defaults to the telescope name. Make sure that checks out here.
assert mir_uv.instrument == "SWARM"
assert ms_uv.instrument == "SMA"
mir_uv.instrument = ms_uv.instrument
# Quick check for history here
assert ms_uv.history != mir_uv.history
ms_uv.history = mir_uv.history
# Only MS has extra keywords, verify those look as expected.
assert ms_uv.extra_keywords == {"DATA_COL": "DATA", "observer": "SMA"}
assert mir_uv.extra_keywords == {}
mir_uv.extra_keywords = ms_uv.extra_keywords
# Make sure the filenames line up as expected.
assert mir_uv.filename == ["sma_test.mir"]
assert ms_uv.filename == ["outtest_mir.ms"]
mir_uv.filename = ms_uv.filename = None
# Finally, with all exceptions handled, check for equality.
assert ms_uv == mir_uv
@pytest.mark.filterwarnings("ignore:LST values stored ")
def test_read_mir_write_uvh5(uv_in_uvh5):
"""
Mir to uvfits loopback test.
Read in Mir files, write out as uvfits, read back in and check for
object equality.
"""
mir_uv, uvh5_uv, testfile = uv_in_uvh5
mir_uv.write_uvh5(testfile)
uvh5_uv.read_uvh5(testfile)
# Check the history first via find
assert 0 == uvh5_uv.history.find(
mir_uv.history + " Read/written with pyuvdata version:"
)
# test fails because of updated history, so this is our workaround for now.
mir_uv.history = uvh5_uv.history
# make sure filenames are what we expect
assert mir_uv.filename == ["sma_test.mir"]
assert uvh5_uv.filename == ["outtest_mir.uvh5"]
mir_uv.filename = uvh5_uv.filename
assert mir_uv == uvh5_uv
def test_write_mir(uv_in_uvfits, err_type=NotImplementedError):
"""
Mir writer test
Check and make sure that attempts to use the writer return a
'not implemented' error.
"""
mir_uv, uvfits_uv, testfile = uv_in_uvfits
# Check and see if the correct error is raised
with pytest.raises(err_type):
mir_uv.write_mir("dummy.mir")
def test_multi_nchan_spw_read(tmp_path):
"""
Mir to uvfits error test for spws of different sizes.
Read in Mir files, write out as uvfits, read back in and check for
object equality.
"""
testfile = os.path.join(DATA_PATH, "sma_test.mir")
uv_in = UVData()
uv_in.read_mir(testfile, corrchunk=[0, 1, 2, 3, 4])
dummyfile = os.path.join(tmp_path, "dummy.mirtest.uvfits")
with pytest.raises(IndexError):
uv_in.write_uvfits(dummyfile, spoof_nonessential=True)
def test_read_mir_no_records():
"""
Mir no-records check
Make sure that mir correctly handles the case where no matching records are found
"""
testfile = os.path.join(DATA_PATH, "sma_test.mir")
uv_in = UVData()
with pytest.raises(IndexError, match="No valid sources selected!"):
uv_in.read_mir(testfile, isource=-1)
with pytest.raises(IndexError, match="No valid records matching those selections!"):
uv_in.read_mir(testfile, irec=-1)
with pytest.raises(IndexError, match="No valid sidebands selected!"):
uv_in.read_mir(testfile, isb=[])
with pytest.raises(IndexError, match="isb values contain invalid entries"):
uv_in.read_mir(testfile, isb=[-156])
def test_read_mir_sideband_select():
"""
Mir sideband read check
Make sure that we can read the individual sidebands out of MIR correctly, and then
stitch them back together as though they were read together from the start.
"""
testfile = os.path.join(DATA_PATH, "sma_test.mir")
mir_dsb = UVData()
mir_dsb.read(testfile)
# Re-order here so that we can more easily compare the two
mir_dsb.reorder_freqs(channel_order="freq", spw_order="freq")
# Drop the history
mir_dsb.history = ""
mir_lsb = UVData()
mir_lsb.read(testfile, isb=[0])
mir_usb = UVData()
mir_usb.read(testfile, isb=[1])
mir_recomb = mir_lsb + mir_usb
# Re-order here so that we can more easily compare the two
mir_recomb.reorder_freqs(spw_order="freq", channel_order="freq")
# Drop the history
mir_recomb.history = ""
assert mir_dsb == mir_recomb
def test_mir_auto_read(
err_type=IndexError, err_msg="Could not determine auto-correlation record size!"
):
"""
Mir read tester
Make sure that Mir autocorrelations are read correctly
"""
testfile = os.path.join(DATA_PATH, "sma_test.mir")
mir_data = mir_parser.MirParser(testfile, has_auto=True)
with pytest.raises(err_type, match=err_msg):
ac_data = mir_data.scan_auto_data(testfile, nchunks=999)
ac_data = mir_data.scan_auto_data(testfile)
assert np.all(ac_data["nchunks"] == 8)
mir_data.load_data(load_vis=False, load_auto=True)
# Select the relevant auto records, which should be for spwin 0-3
auto_data = mir_data.read_auto_data(testfile, ac_data)[:, 0:4, :, :]
assert np.all(
np.logical_or(
auto_data == mir_data.auto_data,
np.logical_and( | np.isnan(auto_data) | numpy.isnan |
import unittest
import numpy as np
from sklearn.neighbors import KDTree as sk_KDTree
from numba_neighbors import binary_tree as bt
from numba_neighbors import kd_tree as kd
# import os
# os.environ['NUMBA_DISABLE_JIT'] = '1'
class KDTreeTest(unittest.TestCase):
def tree(self, data, leaf_size):
return kd.KDTree(data, leaf_size=leaf_size)
@property
def num_dims(self):
return 3
# def test_construction_consistent(self):
# np.random.seed(123)
# N = 1024
# D = 3
# data = np.random.uniform(size=(N, D)).astype(np.float32)
# leaf_size = 16
# actual = kd.get_tree_data(data, leaf_size=leaf_size)
# expected = sk_KDTree(data, leaf_size=leaf_size)
# np.testing.assert_equal(actual.n_nodes, len(expected.node_data))
# np.testing.assert_equal(actual.idx_array, expected.idx_array)
# np.testing.assert_allclose(actual.node_bounds, expected.node_bounds)
# np.testing.assert_equal(actual.idx_start,
# [nd['idx_start'] for nd in expected.node_data])
# np.testing.assert_equal(actual.idx_end,
# [nd['idx_end'] for nd in expected.node_data])
# np.testing.assert_equal(actual.is_leaf,
# [nd['is_leaf'] for nd in expected.node_data])
# np.testing.assert_allclose(actual.radius,
# [nd['radius'] for nd in expected.node_data])
def test_query_consistent(self):
np.random.seed(123)
N = 1024
n = 256
D = self.num_dims
r = 0.05
r2 = r * r
max_neighbors = 32
leaf_size = 16
data = np.random.uniform(size=(N, D)).astype(np.float32)
X_indices = np.random.choice(N, size=n, replace=False)
X = data[X_indices]
sk_tree = sk_KDTree(data, leaf_size=leaf_size)
expected_indices, expected_dists = sk_tree.query_radius(
X, r, return_distance=True, sort_results=True
)
expected_counts = [d.size for d in expected_dists]
expected_dists = np.concatenate(expected_dists, axis=0)
expected_indices = np.concatenate(expected_indices, axis=0)
numba_tree = self.tree(data, leaf_size)
dists = np.full((n, max_neighbors), np.inf, dtype=np.float32)
indices = np.zeros((n, max_neighbors), dtype=np.int64)
counts = np.zeros((n,), dtype=np.int64)
numba_tree.query_radius_prealloc(X, r2, dists, indices, counts)
bt.simultaneous_sort_partial(dists, indices, counts)
mask = np.tile(
np.expand_dims(np.arange(max_neighbors), 0), (n, 1)
) < np.expand_dims(counts, axis=1)
flat_dists = dists[mask]
flat_indices = indices[mask]
np.testing.assert_equal(np.sum(counts), | np.sum(expected_counts) | numpy.sum |
from __future__ import print_function, division, absolute_import
import copy as copylib
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.testutils import reseed
import imgaug.random as iarandom
NP_VERSION = np.__version__
IS_NP_117_OR_HIGHER = (
NP_VERSION.startswith("2.")
or NP_VERSION.startswith("1.25")
or NP_VERSION.startswith("1.24")
or NP_VERSION.startswith("1.23")
or NP_VERSION.startswith("1.22")
or NP_VERSION.startswith("1.21")
or NP_VERSION.startswith("1.20")
or NP_VERSION.startswith("1.19")
or NP_VERSION.startswith("1.18")
or NP_VERSION.startswith("1.17")
)
class _Base(unittest.TestCase):
def setUp(self):
reseed()
class TestConstants(_Base):
def test_supports_new_np_rng_style_is_true(self):
assert iarandom.SUPPORTS_NEW_NP_RNG_STYLE is IS_NP_117_OR_HIGHER
def test_global_rng(self):
iarandom.get_global_rng() # creates global RNG upon first call
assert iarandom.GLOBAL_RNG is not None
class TestRNG(_Base):
@mock.patch("imgaug.random.normalize_generator_")
def test___init___calls_normalize_mocked(self, mock_norm):
_ = iarandom.RNG(0)
mock_norm.assert_called_once_with(0)
def test___init___with_rng(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(rng1)
assert rng2.generator is rng1.generator
@mock.patch("imgaug.random.get_generator_state")
def test_state_getter_mocked(self, mock_get):
mock_get.return_value = "mock"
rng = iarandom.RNG(0)
result = rng.state
assert result == "mock"
mock_get.assert_called_once_with(rng.generator)
@mock.patch("imgaug.random.RNG.set_state_")
def test_state_setter_mocked(self, mock_set):
rng = iarandom.RNG(0)
state = {"foo"}
rng.state = state
mock_set.assert_called_once_with(state)
@mock.patch("imgaug.random.set_generator_state_")
def test_set_state__mocked(self, mock_set):
rng = iarandom.RNG(0)
state = {"foo"}
result = rng.set_state_(state)
assert result is rng
mock_set.assert_called_once_with(rng.generator, state)
@mock.patch("imgaug.random.set_generator_state_")
def test_use_state_of__mocked(self, mock_set):
rng1 = iarandom.RNG(0)
rng2 = mock.MagicMock()
state = {"foo"}
rng2.state = state
result = rng1.use_state_of_(rng2)
assert result == rng1
mock_set.assert_called_once_with(rng1.generator, state)
@mock.patch("imgaug.random.get_global_rng")
def test_is_global__is_global__rng_mocked(self, mock_get):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(rng1.generator)
mock_get.return_value = rng2
assert rng1.is_global_rng() is True
@mock.patch("imgaug.random.get_global_rng")
def test_is_global_rng__is_not_global__mocked(self, mock_get):
rng1 = iarandom.RNG(0)
# different instance with same state/seed should still be viewed as
# different by the method
rng2 = iarandom.RNG(0)
mock_get.return_value = rng2
assert rng1.is_global_rng() is False
@mock.patch("imgaug.random.get_global_rng")
def test_equals_global_rng__is_global__mocked(self, mock_get):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(0)
mock_get.return_value = rng2
assert rng1.equals_global_rng() is True
@mock.patch("imgaug.random.get_global_rng")
def test_equals_global_rng__is_not_global__mocked(self, mock_get):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(1)
mock_get.return_value = rng2
assert rng1.equals_global_rng() is False
@mock.patch("imgaug.random.generate_seed_")
def test_generate_seed__mocked(self, mock_gen):
rng = iarandom.RNG(0)
mock_gen.return_value = -1
seed = rng.generate_seed_()
assert seed == -1
mock_gen.assert_called_once_with(rng.generator)
@mock.patch("imgaug.random.generate_seeds_")
def test_generate_seeds__mocked(self, mock_gen):
rng = iarandom.RNG(0)
mock_gen.return_value = [-1, -2]
seeds = rng.generate_seeds_(2)
assert seeds == [-1, -2]
mock_gen.assert_called_once_with(rng.generator, 2)
@mock.patch("imgaug.random.reset_generator_cache_")
def test_reset_cache__mocked(self, mock_reset):
rng = iarandom.RNG(0)
result = rng.reset_cache_()
assert result is rng
mock_reset.assert_called_once_with(rng.generator)
@mock.patch("imgaug.random.derive_generators_")
def test_derive_rng__mocked(self, mock_derive):
gen = iarandom.convert_seed_to_generator(0)
mock_derive.return_value = [gen]
rng = iarandom.RNG(0)
result = rng.derive_rng_()
assert result.generator is gen
mock_derive.assert_called_once_with(rng.generator, 1)
@mock.patch("imgaug.random.derive_generators_")
def test_derive_rngs__mocked(self, mock_derive):
gen1 = iarandom.convert_seed_to_generator(0)
gen2 = iarandom.convert_seed_to_generator(1)
mock_derive.return_value = [gen1, gen2]
rng = iarandom.RNG(0)
result = rng.derive_rngs_(2)
assert result[0].generator is gen1
assert result[1].generator is gen2
mock_derive.assert_called_once_with(rng.generator, 2)
@mock.patch("imgaug.random.is_generator_equal_to")
def test_equals_mocked(self, mock_equal):
mock_equal.return_value = "foo"
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(1)
result = rng1.equals(rng2)
assert result == "foo"
mock_equal.assert_called_once_with(rng1.generator, rng2.generator)
def test_equals_identical_generators(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(rng1)
assert rng1.equals(rng2)
def test_equals_with_similar_generators(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(0)
assert rng1.equals(rng2)
def test_equals_with_different_generators(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(1)
assert not rng1.equals(rng2)
def test_equals_with_advanced_generator(self):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(0)
rng2.advance_()
assert not rng1.equals(rng2)
@mock.patch("imgaug.random.advance_generator_")
def test_advance__mocked(self, mock_advance):
rng = iarandom.RNG(0)
result = rng.advance_()
assert result is rng
mock_advance.assert_called_once_with(rng.generator)
@mock.patch("imgaug.random.copy_generator")
def test_copy_mocked(self, mock_copy):
rng1 = iarandom.RNG(0)
rng2 = iarandom.RNG(1)
mock_copy.return_value = rng2.generator
result = rng1.copy()
assert result.generator is rng2.generator
mock_copy.assert_called_once_with(rng1.generator)
@mock.patch("imgaug.random.RNG.copy")
@mock.patch("imgaug.random.RNG.is_global_rng")
def test_copy_unless_global_rng__is_global__mocked(self, mock_is_global,
mock_copy):
rng = iarandom.RNG(0)
mock_is_global.return_value = True
mock_copy.return_value = "foo"
result = rng.copy_unless_global_rng()
assert result is rng
mock_is_global.assert_called_once_with()
assert mock_copy.call_count == 0
@mock.patch("imgaug.random.RNG.copy")
@mock.patch("imgaug.random.RNG.is_global_rng")
def test_copy_unless_global_rng__is_not_global__mocked(self, mock_is_global,
mock_copy):
rng = iarandom.RNG(0)
mock_is_global.return_value = False
mock_copy.return_value = "foo"
result = rng.copy_unless_global_rng()
assert result is "foo"
mock_is_global.assert_called_once_with()
mock_copy.assert_called_once_with()
def test_duplicate(self):
rng = iarandom.RNG(0)
rngs = rng.duplicate(1)
assert rngs == [rng]
def test_duplicate_two_entries(self):
rng = iarandom.RNG(0)
rngs = rng.duplicate(2)
assert rngs == [rng, rng]
@mock.patch("imgaug.random.create_fully_random_generator")
def test_create_fully_random_mocked(self, mock_create):
gen = iarandom.convert_seed_to_generator(0)
mock_create.return_value = gen
rng = iarandom.RNG.create_fully_random()
mock_create.assert_called_once_with()
assert rng.generator is gen
@mock.patch("imgaug.random.derive_generators_")
def test_create_pseudo_random__mocked(self, mock_get):
rng_glob = iarandom.get_global_rng()
rng = iarandom.RNG(0)
mock_get.return_value = [rng.generator]
result = iarandom.RNG.create_pseudo_random_()
assert result.generator is rng.generator
mock_get.assert_called_once_with(rng_glob.generator, 1)
@mock.patch("imgaug.random.polyfill_integers")
def test_integers_mocked(self, mock_func):
mock_func.return_value = "foo"
rng = iarandom.RNG(0)
result = rng.integers(low=0, high=1, size=(1,), dtype="int64",
endpoint=True)
assert result == "foo"
mock_func.assert_called_once_with(
rng.generator, low=0, high=1, size=(1,), dtype="int64",
endpoint=True)
@mock.patch("imgaug.random.polyfill_random")
def test_random_mocked(self, mock_func):
mock_func.return_value = "foo"
rng = iarandom.RNG(0)
out = | np.zeros((1,), dtype="float64") | numpy.zeros |
"""
pyart.correct.filters
=====================
Functions for creating gate filters (masks) which can be used it various
corrections routines in Py-ART.
.. autosummary::
:toctree: generated/
moment_based_gate_filter
.. autosummary::
:toctree: generated/
:template: dev_template.rst
GateFilter
"""
import numpy as np
from ..config import get_field_name
def moment_based_gate_filter(
radar, ncp_field=None, rhv_field=None, refl_field=None,
min_ncp=0.5, min_rhv=None, min_refl=-20., max_refl=100.0):
"""
Create a filter which removes undesired gates based on moments.
Creates a gate filter in which the following gates are excluded:
* Gates where the reflectivity is outside the interval min_refl, max_refl.
* Gates where the normalized coherent power is below min_ncp.
* Gates where the cross correlation ratio is below min_rhi. Using the
default parameter this filtering is disabled.
* Gates where any of the above three fields are masked or contain
invalid values (NaNs or infs).
* If any of these three fields do not exist in the radar that fields filter
criteria is not applied.
Parameters
----------
radar : Radar
Radar object from which the gate filter will be built.
refl_field, ncp_field, rhv_field : str
Names of the radar fields which contain the reflectivity, normalized
coherent power (signal quality index) and cross correlation ratio
(RhoHV) from which the gate filter will be created using the above
criteria. A value of None for any of these parameters will use the
default field name as defined in the Py-ART configuration file.
min_ncp, min_rhv : float
Minimum values for the normalized coherence power and cross
correlation ratio. Gates in these fields below these limits as well as
gates which are masked or contain invalid values will be excluded and
not used in calculation which use the filter. A value of None will
disable filtering based upon the given field including removing
masked or gates with an invalid value. To disable the thresholding
but retain the masked and invalid filter set the parameter to a value
below the lowest value in the field.
min_refl, max_refl : float
Minimum and maximum values for the reflectivity. Gates outside
of this interval as well as gates which are masked or contain invalid
values will be excluded and not used in calculation which use this
filter. A value or None for one of these parameters will disable the
minimum or maximum filtering but retain the other. A value of None
for both of these values will disable all filtering based upon the
reflectivity including removing masked or gates with an invalid value.
To disable the interval filtering but retain the masked and invalid
filter set the parameters to values above and below the lowest and
greatest values in the reflectivity field.
Returns
-------
gatefilter : :py:class:`GateFilter`
A gate filter based upon the described criteria. This can be
used as a gatefilter parameter to various functions in pyart.correct.
"""
# parse the field parameters
if refl_field is None:
refl_field = get_field_name('reflectivity')
if ncp_field is None:
ncp_field = get_field_name('normalized_coherent_power')
if rhv_field is None:
rhv_field = get_field_name('cross_correlation_ratio')
# filter gates based upon field parameters
gatefilter = GateFilter(radar)
if (min_ncp is not None) and (ncp_field in radar.fields):
gatefilter.exclude_below(ncp_field, min_ncp)
gatefilter.exclude_masked(ncp_field)
gatefilter.exclude_invalid(ncp_field)
if (min_rhv is not None) and (rhv_field in radar.fields):
gatefilter.exclude_below(rhv_field, min_rhv)
gatefilter.exclude_masked(rhv_field)
gatefilter.exclude_invalid(rhv_field)
if refl_field in radar.fields:
if min_refl is not None:
gatefilter.exclude_below(refl_field, min_refl)
gatefilter.exclude_masked(refl_field)
gatefilter.exclude_invalid(refl_field)
if max_refl is not None:
gatefilter.exclude_above(refl_field, max_refl)
gatefilter.exclude_masked(refl_field)
gatefilter.exclude_invalid(refl_field)
return gatefilter
class GateFilter(object):
"""
A class for building a boolean arrays for filtering gates based on
a set of condition typically based on the values in the radar fields.
These filter can be used in various algorithms and calculations within
Py-ART.
See :py:func:`pyart.correct.GateFilter.exclude_below` for method
parameter details.
Parameters
----------
radar : Radar
Radar object from which gate filter will be build.
exclude_based : bool, optional
True, the default and suggested method, will begin with all gates
included and then use the exclude methods to exclude gates based on
conditions. False will begin with all gates excluded from which
a set of gates to include should be set using the include methods.
Attributes
----------
gate_excluded : array, dtype=bool
Boolean array indicating if a gate should be excluded from a
calculation. Elements marked True indicate the corresponding gate
should be excluded. Those marked False should be included.
This is read-only attribute, any changes to the array will NOT
be reflected in gate_included and will be lost when the attribute is
accessed again.
gate_included : array, dtype=bool
Boolean array indicating if a gate should be included in a
calculation. Elements marked True indicate the corresponding gate
should be include. Those marked False should be excluded.
This is read-only attribute, any changes to the array will NOT
be reflected in gate_excluded and will be lost when the attribute is
accessed again.
Examples
--------
>>> import pyart
>>> radar = pyart.io.read('radar_file.nc')
>>> gatefilter = pyart.correct.GateFilter(radar)
>>> gatefilter.exclude_below('reflectivity', 10)
>>> gatefilter.exclude_below('normalized_coherent_power', 0.75)
"""
def __init__(self, radar, exclude_based=True):
""" initialize """
self._radar = radar
shape = (radar.nrays, radar.ngates)
if exclude_based:
# start with all gates included, exclude gates based on a set
# of rules using the exclude_ methods.
self._gate_excluded = np.zeros(shape, dtype=np.bool)
else:
# start with all gates excluded, include gates based on a set
# of rules using the include_ methods.
self._gate_excluded = np.ones(shape, dtype=np.bool)
# Implemetation is based on marking excluded gates stored in the private
# _gate_excluded attribute. The gate_included attribute can be found
# by taking the ones complement of gates_included.
def copy(self):
""" Return a copy of the gatefilter. """
a = GateFilter(self._radar)
a._gate_excluded = self._gate_excluded.copy()
return a
@property
def gate_included(self):
return ~self._gate_excluded.copy()
@property
def gate_excluded(self):
return self._gate_excluded.copy()
def _get_fdata(self, field):
""" Check that the field exists and retrieve field data. """
self._radar.check_field_exists(field)
return self._radar.fields[field]['data']
def _merge(self, marked, op, exclude_masked):
""" Merge an array of marked gates with the exclude array. """
# exclude masked elements in marked by replacing them with the value
# of the exclude_masked flag. This does nothing if marked is a
# non-masked array.
if exclude_masked not in [True, False]:
raise ValueError("exclude_masked must be 'True' or 'False'")
marked = np.ma.filled(marked, exclude_masked)
# merge array of marked gates with existing excluded gates
# using the specified operation.
if op == 'or':
self._gate_excluded = np.logical_or(self._gate_excluded, marked)
elif op == 'and':
self._gate_excluded = np.logical_and(self._gate_excluded, marked)
elif op == 'new':
self._gate_excluded = marked
else:
raise ValueError("invalid 'op' parameter: ", op)
return
###################
# exclude methods #
###################
def exclude_below(self, field, value, exclude_masked=True, op='or',
inclusive=False):
"""
Exclude gates where a given field is below a given value.
Parameters
----------
field : str
Name of field compared against the value.
value : float
Gates with a value below this value in the specified field will
be marked for exclusion in the filter.
exclude_masked : bool, optional
True to filter masked values in the specified field if the data is
a masked array, False to include any masked values.
op : {'and', 'or', 'new'}
Operation to perform when merging the existing set of excluded
gates with the excluded gates from the current operation.
'and' will perform a logical AND operation, 'or' a logical OR,
and 'new' will replace the existing excluded gates with the one
generated here. 'or', the default for exclude methods, is
typically desired when building up a set of conditions for
excluding gates where the desired effect is to exclude gates which
meet any of the conditions. 'and', the default for include
methods, is typically desired when building up a set of conditions
where the desired effect is to include gates which meet any of the
conditions. Note that the 'and' method MAY results in including
gates which have previously been excluded because they were masked
or invalid.
inclusive : bool
Indicates whether the specified value should also be excluded.
"""
if inclusive:
marked = self._get_fdata(field) <= value
else:
marked = self._get_fdata(field) < value
return self._merge(marked, op, exclude_masked)
def exclude_above(self, field, value, exclude_masked=True, op='or',
inclusive=False):
""" Exclude gates where a given field is above a given value. """
if inclusive:
marked = self._get_fdata(field) >= value
else:
marked = self._get_fdata(field) > value
return self._merge(marked, op, exclude_masked)
def exclude_inside(self, field, v1, v2, exclude_masked=True, op='or',
inclusive=True):
""" Exclude gates where a given field is inside a given interval. """
if v2 < v1:
(v1, v2) = (v2, v1)
fdata = self._get_fdata(field)
if inclusive:
marked = (fdata >= v1) & (fdata <= v2)
else:
marked = (fdata > v1) & (fdata < v2)
return self._merge(marked, op, exclude_masked)
def exclude_outside(self, field, v1, v2, exclude_masked=True, op='or',
inclusive=False):
""" Exclude gates where a given field is outside a given interval. """
if v2 < v1:
(v1, v2) = (v2, v1)
fdata = self._get_fdata(field)
if inclusive:
marked = (fdata <= v1) | (fdata >= v2)
else:
marked = (fdata < v1) | (fdata > v2)
return self._merge(marked, op, exclude_masked)
def exclude_equal(self, field, value, exclude_masked=True, op='or'):
""" Exclude gates where a given field is equal to a value. """
marked = (self._get_fdata(field) == value)
return self._merge(marked, op, exclude_masked)
def exclude_not_equal(self, field, value, exclude_masked=True, op='or'):
""" Exclude gates where a given field is not equal to a value. """
marked = (self._get_fdata(field) != value)
return self._merge(marked, op, exclude_masked)
def exclude_all(self):
""" Exclude all gates. """
self._gate_excluded = np.ones_like(self._gate_excluded)
return
def exclude_none(self):
""" Exclude no gates, include all gates. """
self._gate_excluded = np.zeros_like(self._gate_excluded)
return
def exclude_masked(self, field, exclude_masked=True, op='or'):
""" Exclude gates where a given field is masked. """
marked = np.ma.getmaskarray(self._get_fdata(field))
return self._merge(marked, op, exclude_masked)
def exclude_invalid(self, field, exclude_masked=True, op='or'):
"""
Exclude gates where an invalid value occurs in a field (NaNs or infs).
"""
marked = ~np.isfinite(self._get_fdata(field))
return self._merge(marked, op, exclude_masked)
def exclude_gates(self, mask, exclude_masked=True, op='or'):
"""
Exclude gates where a given mask is equal True.
Parameters
----------
mask : numpy array
Boolean numpy array with same shape as a field array.
exclude_masked : bool, optional
True to filter masked values in the specified mask if it is
a masked array, False to include any masked values.
op : {'and', 'or', 'new'}
Operation to perform when merging the existing set of excluded
gates with the excluded gates from the current operation.
'and' will perform a logical AND operation, 'or' a logical OR,
and 'new' will replace the existing excluded gates with the one
generated here. 'or', the default for exclude methods, is
typically desired when building up a set of conditions for
excluding gates where the desired effect is to exclude gates which
meet any of the conditions. 'and', the default for include
methods, is typically desired when building up a set of conditions
where the desired effect is to include gates which meet any of the
conditions. Note that the 'and' method MAY results in including
gates which have previously been excluded because they were masked
or invalid.
"""
fdata = next(iter(self._radar.fields.values()))['data']
if mask.shape != fdata.shape:
raise ValueError("mask array must be the same size as a field.")
marked = np.array(mask, dtype='bool')
return self._merge(marked, op, exclude_masked)
####################
# include_ methods #
####################
def include_below(self, field, value, exclude_masked=True, op='and',
inclusive=False):
""" Include gates where a given field is below a given value. """
if inclusive:
marked = self._get_fdata(field) <= value
else:
marked = self._get_fdata(field) < value
self._merge(~marked, op, exclude_masked)
def include_above(self, field, value, exclude_masked=True, op='and',
inclusive=False):
""" Include gates where a given field is above a given value. """
if inclusive:
marked = self._get_fdata(field) >= value
else:
marked = self._get_fdata(field) > value
self._merge(~marked, op, exclude_masked)
def include_inside(self, field, v1, v2, exclude_masked=True, op='and',
inclusive=True):
""" Include gates where a given field is inside a given interval. """
if v2 < v1:
(v1, v2) = (v2, v1)
fdata = self._get_fdata(field)
if inclusive:
marked = (fdata >= v1) & (fdata <= v2)
else:
marked = (fdata > v1) & (fdata < v2)
return self._merge(~marked, op, exclude_masked)
def include_outside(self, field, v1, v2, exclude_masked=True, op='and',
inclusive=False):
""" Include gates where a given field is outside a given interval. """
if v2 < v1:
(v1, v2) = (v2, v1)
fdata = self._get_fdata(field)
if inclusive:
marked = (fdata <= v1) | (fdata >= v2)
else:
marked = (fdata < v1) | (fdata > v2)
return self._merge(~marked, op, exclude_masked)
def include_equal(self, field, value, exclude_masked=True, op='and'):
""" Include gates where a given field is equal to a value. """
marked = (self._get_fdata(field) == value)
return self._merge(~marked, op, exclude_masked)
def include_not_equal(self, field, value, exclude_masked=True, op='and'):
""" Include gates where a given field is not equal to a value. """
marked = (self._get_fdata(field) != value)
return self._merge(~marked, op, exclude_masked)
def include_all(self):
""" Include all gates. """
self._gate_excluded = np.zeros_like(self._gate_excluded)
def include_none(self):
""" Include no gates, exclude all gates. """
self._gate_excluded = np.ones_like(self._gate_excluded)
def include_not_masked(self, field, exclude_masked=True, op='and'):
""" Include gates where a given field in not masked. """
marked = np.ma.getmaskarray(self._get_fdata(field))
return self._merge(marked, op, exclude_masked)
def include_valid(self, field, exclude_masked=True, op='and'):
"""
Include gates where a valid value occurs in a field (not NaN or inf).
"""
marked = np.isfinite(self._get_fdata(field))
return self._merge(~marked, op, exclude_masked)
def include_gates(self, mask, exclude_masked=True, op='and'):
"""
Include gates where a given mask is equal True.
Parameters
----------
mask : numpy array
Boolean numpy array with same shape as a field array.
exclude_masked : bool, optional
True to filter masked values in the specified mask if it is
a masked array, False to include any masked values.
op : {'and', 'or', 'new'}
Operation to perform when merging the existing set of excluded
gates with the excluded gates from the current operation.
'and' will perform a logical AND operation, 'or' a logical OR,
and 'new' will replace the existing excluded gates with the one
generated here. 'or', the default for exclude methods, is
typically desired when building up a set of conditions for
excluding gates where the desired effect is to exclude gates which
meet any of the conditions. 'and', the default for include
methods, is typically desired when building up a set of conditions
where the desired effect is to include gates which meet any of the
conditions. Note that the 'or' method MAY results in excluding
gates which have previously been included.
"""
fdata = next(iter(self._radar.fields.values()))['data']
if mask.shape != fdata.shape:
raise ValueError("Mask array must be the same size as a field.")
marked = ~ | np.array(mask, dtype='bool') | numpy.array |
'''
Name: load_ops.py
Desc: Input pipeline using feed dict method to provide input data to model.
Some of this code is taken from <NAME>'s colorzation github
and python caffe library.
Other parts of this code have been taken from <NAME>'s library
'''
from __future__ import absolute_import, division, print_function
import itertools
import json
import math
import numpy as np
from numpy import linalg as LA
import os
from PIL import Image
import PIL
import pdb
import pickle
import random
import scipy
from scipy.ndimage.interpolation import zoom
from scipy.ndimage.filters import gaussian_filter
import skimage
import skimage.io
from skimage.transform import resize
import sklearn.neighbors as nn
import string
import subprocess
import sys
# import tensorflow as tf
from transforms3d import euler
import transforms3d
import traceback as tb
# if tf.__version__ == '0.10.0':
# tf_summary_scalar = tf.scalar_summary
# else:
# tf_summary_scalar = tf.summary.scalar
#######################
# Loading fns
#######################
def load_scaled_image( filename, color=True ):
"""
Load an image converting from grayscale or alpha as needed.
From KChen
Args:
filename : string
color : boolean
flag for color format. True (default) loads as RGB while False
loads as intensity (if image is already grayscale).
Returns
image : an image with type np.float32 in range [0, 1]
of size (H x W x 3) in RGB or
of size (H x W x 1) in grayscale.
By kchen
"""
img = skimage.img_as_float(skimage.io.imread(filename, as_gray=not color)).astype(np.float32)
if img.ndim == 2:
img = img[:, :, np.newaxis]
if color:
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def load_raw_image( filename, color=True, use_pil=False ):
"""
Load an image converting from grayscale or alpha as needed.
Adapted from KChen
Args:
filename : string
color : boolean
flag for color format. True (default) loads as RGB while False
loads as intensity (if image is already grayscale).
Returns
image : an image with image original dtype and image pixel range
of size (H x W x 3) in RGB or
of size (H x W x 1) in grayscale.
"""
if use_pil:
img = Image.open( filename )
else:
img = skimage.io.imread(filename, as_gray=not color)
if use_pil:
return img
if img.ndim == 2:
img = img[:, :, np.newaxis]
if color:
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
#########################
# Image manipulation fns
#########################
def resize_rescale_imagenet(img, new_dims, interp_order=1, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = img[:,:,[2,1,0]] * 255.
mean_bgr = [103.062623801, 115.902882574, 123.151630838]
img = img - mean_bgr
return img
def resize_rescale_image_low_sat(img, new_dims, new_scale, interp_order=1, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = np.clip(img, 0.1, 0.9)
img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=no_clip )
return img
def resize_rescale_image_low_sat_2(img, new_dims, new_scale, interp_order=1, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = np.clip(img, 0.2, 0.8)
img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=no_clip )
return img
def resize_rescale_image(img, new_dims, new_scale, interp_order=1, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img ) # between [0,255] (512,512,3)
img = resize_image( img, new_dims, interp_order ) # between [0,1] (512,512,3)
img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=no_clip ) # between [-1,1] (256,256,3)
return img
def resize_rescale_image_gaussian_blur(img, new_dims, new_scale, interp_order=1, blur_strength=4, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=True )
blurred = gaussian_filter(img, sigma=blur_strength)
if not no_clip:
min_val, max_val = new_scale
np.clip(blurred, min_val, max_val, out=blurred)
return blurred
def resize_image(im, new_dims, interp_order=1):
"""
Resize an image array with interpolation.
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
By kchen @ https://github.com/kchen92/joint-representation/blob/24b30ca6963d2ec99618af379c1e05e1f7026710/lib/data/input_pipeline_feed_dict.py
"""
if type(im) == PIL.PngImagePlugin.PngImageFile:
interps = [PIL.Image.NEAREST, PIL.Image.BILINEAR]
return skimage.util.img_as_float(im.resize(new_dims, interps[interp_order]))
if all( new_dims[i] == im.shape[i] for i in range( len( new_dims ) ) ):
resized_im = im #return im.astype(np.float32)
elif im.shape[-1] == 1 or im.shape[-1] == 3:
resized_im = resize(im, new_dims, order=interp_order, preserve_range=True)
else:
# ndimage interpolates anything but more slowly.
scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2]))
resized_im = zoom(im, scale + (1,), order=interp_order)
# resized_im = resized_im.astype(np.float32)
return resized_im
def rescale_image(im, new_scale=[-1.,1.], current_scale=None, no_clip=False):
"""
Rescales an image pixel values to target_scale
Args:
img: A np.float_32 array, assumed between [0,1]
new_scale: [min,max]
current_scale: If not supplied, it is assumed to be in:
[0, 1]: if dtype=float
[0, 2^16]: if dtype=uint
[0, 255]: if dtype=ubyte
Returns:
rescaled_image
"""
im = skimage.img_as_float(im).astype(np.float32)
if current_scale is not None:
min_val, max_val = current_scale
if not no_clip:
im = np.clip(im, min_val, max_val)
im = im - min_val
im /= (max_val - min_val)
min_val, max_val = new_scale
im *= (max_val - min_val)
im += min_val
return im
def resize_and_rescale_image_log( img, new_dims, offset=1., normalizer=1.):
"""
Resizes and rescales an img to log-linear
Args:
img: A np array
offset: Shifts values by offset before taking log. Prevents
taking the log of a negative number
normalizer: divide by the normalizing factor after taking log
Returns:
rescaled_image
"""
img = np.log( float( offset ) + img ) / normalizer
img = resize_image(img, new_dims)
return img
def rescale_image_log( img, offset=1., normalizer=1. ):
"""
Rescales an img to log-linear
Args:
img: A np array
offset: Shifts values by offset before taking log. Prevents
taking the log of a negative number
normalizer: divide by the normalizing factor after taking log
Returns:
rescaled_image
"""
return np.log( float( offset ) + img ) / normalizer
################
# Curvature #
#################
def curvature_preprocess(img, new_dims, interp_order=1):
img = resize_image(img, new_dims, interp_order)
img = img[:,:,:2]
img = img - [123.572, 120.1]
img = img / [31.922, 21.658]
return img
def curvature_preprocess_gaussian_with_blur(img, new_dims, interp_order=1, blur_strength=4):
k1 = img[:,:,0].astype(np.float32) - 128.0
k2 = img[:,:,1].astype(np.float32) - 128.0
curv = k1 * k2
curv = curv * 8.0 / (127.0 ** 2)
curv = curv[:,:,np.newaxis]
curv = resize_image(curv, new_dims, interp_order)
blurred = gaussian_filter(curv, sigma=blur_strength)
return blurred
def curvature_preprocess_gaussian(img, new_dims, interp_order=1):
k1 = img[:,:,0].astype(np.float32) - 128.0
k2 = img[:,:,1].astype(np.float32) - 128.0
curv = k1 * k2
curv = curv * 8.0 / (127.0 ** 2)
curv = curv[:,:,np.newaxis]
curv = resize_image(curv, new_dims, interp_order)
return curv
#################
# Denoising #
#################
def random_noise_image(img, new_dims, new_scale, interp_order=1 ):
"""
Add noise to an image
Args:
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns:
a noisy version of the original clean image
"""
img = skimage.util.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = skimage.util.random_noise(img, var=0.01)
img = rescale_image( img, new_scale )
return img
#################
# Colorization #
#################
def to_light_low_sat(img, new_dims, new_scale, interp_order=1 ):
"""
Turn an image into lightness
Args:
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns:
a lightness version of the original image
"""
img = skimage.img_as_float( img )
img = np.clip(img, 0.2, 0.8)
img = resize_image( img, new_dims, interp_order )
img = skimage.color.rgb2lab(img)[:,:,0]
img = rescale_image( img, new_scale, current_scale=[0,100])
return np.expand_dims(img,2)
def to_light(img, new_dims, new_scale, interp_order=1 ):
"""
Turn an image into lightness
Args:
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns:
a lightness version of the original image
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = skimage.color.rgb2lab(img)[:,:,0]
img = rescale_image( img, new_scale, current_scale=[0,100])
return np.expand_dims(img,2)
def to_ab(img, new_dims, new_scale, interp_order=1 ):
"""
Turn an image into ab
Args:
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns:
a ab version of the original image
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = skimage.color.rgb2lab(img)[:,:,1:]
img = rescale_image( img, new_scale, current_scale=[-100,100])
return img
def ab_image_to_prob(img, new_dims, root, interp_order=1):
"""
Turn an image into a probability distribution across color pair specified in pts_in_hull.npy
It's referencing: https://github.com/richzhang/colorization
Args:
im : (H x W x K) ndarray
Returns:
Color label ground truth across 313 possible ab color combinations
"""
img = resize_image( img, new_dims, interp_order ).astype('uint8')
img = skimage.color.rgb2lab(img)[:,:,1:]
curr_dir = os.path.dirname(os.path.realpath(__file__))
cc = np.load(os.path.join(curr_dir, 'pts_in_hull.npy'))
K = cc.shape[0]
NN = 10
sigma = 5.
nbrs = nn.NearestNeighbors(n_neighbors=NN, algorithm='ball_tree').fit(cc)
num_pixels = img.shape[0] * img.shape[1]
img_flattened = img.reshape(num_pixels, img.shape[2])
encoded_flattened = np.zeros((num_pixels, K))
point_index = np.arange(0,num_pixels, dtype='int')[:, np.newaxis]
(dists, inds) = nbrs.kneighbors(img_flattened)
wts = np.exp(-dists**2/(2*sigma**2))
wts = wts/np.sum(wts,axis=1)[:,np.newaxis]
encoded_flattened[point_index, inds] = wts
encoded = encoded_flattened.reshape([img.shape[0], img.shape[1], K])
############## Prior Boost Mask #################
prior_factor = np.load(os.path.join(curr_dir, 'prior_factor_in_door.npy'))
encoded_maxid = np.argmax(encoded, axis=-1)
mask = prior_factor[encoded_maxid]
return encoded, mask
###################
# Context Encoder #
###################
def context_encoder_input( img, new_dims, new_scale, interp_order=1 ):
'''
Context encoder input function, substitute the middle section with constant
Returns:
----------
img: with center 1/4 being constant average value
'''
img = resize_rescale_image(img, new_dims, new_scale, interp_order=interp_order)
H,W,K = img.shape
img[ int(H/4):int(3*H/4), int(W/4):int(3*W/4), :] = 0
return img
def context_encoder_output(img, new_dims, new_scale, interp_order=1 ):
'''
Context encoder target function, take out the middle chunk
'''
whole_dims = (new_dims[0]*2, new_dims[1]*2)
img = resize_rescale_image(img, whole_dims, new_scale, interp_order=interp_order)
H,W,_ = img.shape
center_piece = img[ int(H/4):int(H/4)+new_dims[0]
, int(W/4):int(W/4)+new_dims[1], :]
return center_piece
#################################
# Discriminative Target Process #
#################################
def parse_filename( filename ):
"""
Filename is in the format:
'/{PATH_TO_DATA_ROOT}/{MODEL_ID}/{domain}
/point_{POINT_ID}_view_{VIEW_ID}_domain_{DOMAIN_NAME}.png'
Parameters:
-----------
filename: a string in the formate specified above.
Returns:
-----------
path_to_root: path to data root directory
domain: domain name
model_id: model id
point_id: point id
view_id: view id
"""
components = filename.split("\\")
domain = components[-2]
name_components = components[-1].split('_')
root_length = len(components) - 3
if len(name_components) == 6:
point_id = name_components[1]
view_id = name_components[3]
elif len(name_components) == 1:
view_id = name_components[0]
point_id = components[root_length + 1]
root = components[0].split("/")
model_id = root[-1]
path_to_root = "/".join(root[0:-1])
return path_to_root, domain, model_id, point_id, view_id
preappend_slash = (filename[0] == '/')
components = filename.split('/')[preappend_slash:]
root_length = len(components) - 3
if preappend_slash:
path_to_root = os.path.join("/" , *components[:root_length])
else:
path_to_root = os.path.join(*components[:root_length])
model_id = components[root_length]
name_components = components[-1].split('_')
if len(name_components) == 6:
domain = components[root_length+1]
point_id = name_components[1]
view_id = name_components[3]
elif len(name_components) == 1:
view_id = name_components[0]
point_id = components[root_length+1]
domain = 'rgb'
return path_to_root, domain, model_id, point_id, view_id
def generate_rgb_image_filename_from_ID(root, model_id, point_id, view_id):
'''
Given the root, model_id, point_id, view_id of an image, return the rgb
file path of that image. The file path is in the format:
/{root}/{model_id}/rgb/
point_{point_id}_view_{view_id}_domain_rgb.png
Parameters:
-----------
root: path to root
model_id: id of the model
point_id: the id number of the point
view_id: the id number of views
Returns:
-----------
path: file path to the image file
'''
filename = "point_{point_id}_view_{view_id}_domain_rgb.png".format(
point_id=point_id, view_id=view_id)
path = os.path.join(root, model_id, 'rgb', filename)
return path
def make_image_filenames( filename, num_input):
'''
Turn one image filename that contains the information of a image pair into multiple
image filenames.
For camera pose matching.
The filename should be in the same format, except the point_id and view_id field is
multiple integers with length num_input separated by commas:
/{PATH_TO_ROOT}/{MODEL_ID}/{domain}/{LIST_OF_POINT_IDS}_
view_{LIST_OF_VIEW_IDS}_{SOMETHING ELSE}
Parameters:
-----------
filename: A filename that in the format specified as above.
num_input: length of the LIST_OF_POINT_IDS
Returns:
-----------
filenames: A list of image filenames
'''
if len(filename.split('/')) == 6 or len(filename.split('/')) == 8 :
return [filename] * num_input
root, domain, model_id, point_ids, view_ids = parse_filename( filename )
model_ids = model_id.split(',')
point_ids = point_ids.split(',')
view_ids = view_ids.split(',')
if len(view_ids) != num_input:
if len(view_ids) == 1 and len(point_ids) == 1:
image_name = generate_rgb_image_filename_from_ID(root, model_id, point_ids[0], view_ids[0])
image_name = [image_name] * num_input
return image_name
else:
raise ValueError("num_input doesn't match the length of view_ids")
filenames = []
if len(point_ids) == 1:
point_id = point_ids[0]
for index in range(num_input):
view_id = view_ids[index]
filenames.append(generate_rgb_image_filename_from_ID(root, model_id, point_id, view_id))
else:
for index in range(num_input):
view_id = view_ids[index]
point_id = point_ids[index]
if len(model_ids) > 1:
model_i = model_ids[index]
else:
model_i = model_id
filenames.append(generate_rgb_image_filename_from_ID(root, model_i, point_id, view_id))
return filenames
###################
# Point Matching #
###################
def point_match_new( filename ):
model_ids = filename.split('/')[0]
if len(model_ids.split(',')) == 2:
return 0
point_ids = filename.split('/')[-2]
if len(point_ids.split(',')) == 2:
return 0
return 1
################################
# Camera Pose Helper functions #
################################
def parse_fixated_filename( filename ):
"""
Fixated filename is stored in similar format as single filename, but with multiple views
Return a list of filenames that has root directory specifid by root_dir
Parameters:
-----------
filename: filename in the specific format
Returns:
-----------
full_paths: a list of full path to camera pose info for the point-view pair
"""
root, domain, model_id, point_id, num_views = parse_filename( filename )
view_ids = num_views.split(',')
new_domain = "fixatedpose"
domain = "points"
full_paths = []
for view_id in view_ids:
filename = 'point_{point_id}_view_{view_id}_domain_{domain}.json'.format(
point_id=point_id,
view_id=view_id,
domain=new_domain)
full_path = os.path.join(root, model_id, domain, filename)
full_paths.append(full_path)
return full_paths
def parse_nonfixated_filename( filename ):
"""
Nonfixated filename is stored in the format:
'/{ROOT}/{MODEL_ID}/{POINT_IDS}/{VIEW_IDS}'
POINT_IDS and VIEW_IDS are lists that are separated by comma.
Return a list of filenames that has root directory specifid by root_dir
Parameters:
-----------
filename: filename in the specific format
Returns:
-----------
full_paths: a list of full path to camera pose info for the point-view pair
"""
root, domain, model_id, num_points, num_views = parse_filename( filename )
point_ids = num_points.split(',')
view_ids = num_views.split(',')
domain = "points"
new_domain = "fixatedpose"
full_path = []
for i in range(len(point_ids)):
filename = 'point_{point_id}_view_{view_id}_domain_{domain}.json'.format(
point_id=point_ids[i],
view_id=view_ids[i],
domain=new_domain)
full_path_i = os.path.join(root, model_id, domain, filename)
full_path.append(full_path_i)
return full_path
def calculate_relative_camera_location(full_path1, full_path2):
"""
Given two file path to two json files, extract the 'camera_location'
and 'camera_rotation_final' field, and calcualte the relative camera pose
Parameters:
__________
full_path1, full_path2: paths to json information
Returns:
__________
camera_poses: vector that encode the camera pose info for two images
"""
assert os.path.isfile(full_path1) and os.path.isfile(full_path2)
with open(full_path1, 'r') as fp:
data1 = json.load(fp)
with open(full_path2, 'r') as fp:
data2 = json.load(fp)
key = ['camera_location', 'camera_rotation_final']
location1 = data1[key[0]]
location2 = data2[key[0]]
translation = np.asarray(location1) - np.asarray(location2)
return translation
def calculate_relative_camera_pose(full_path1, full_path2, fixated=True, raw=False):
"""
Given two file path to two json files, extract the 'camera_location'
and 'camera_rotation_final' field, and calcualte the relative camera pose
Parameters:
__________
full_path1, full_path2: paths to json information
Returns:
__________
camera_poses: vector that encode the camera pose info for two images
"""
assert os.path.isfile(full_path1) and os.path.isfile(full_path2)
with open(full_path1, 'r') as fp:
data1 = json.load(fp)
with open(full_path2, 'r') as fp:
data2 = json.load(fp)
key = ['camera_location', 'camera_rotation_final']
location1 = np.asarray(data1[key[0]])
rotation1 = data1[key[1]]
matrix1 = euler.euler2mat(*rotation1, axes='sxyz')
location2 = np.asarray(data2[key[0]])
rotation2 = data2[key[1]]
matrix2 = euler.euler2mat(*rotation2, axes='sxyz')
relative_rotation_matrix = np.matmul(np.transpose( matrix2 ), matrix1)
relative_rotation = euler.mat2euler(relative_rotation_matrix, axes='sxyz')
translation = np.matmul(np.transpose(matrix2), location1 - location2)
pose = np.hstack((relative_rotation, translation))
if not raw:
if fixated:
std = np.asarray([ 10.12015407, 8.1103528, 1.09171896, 1.21579016, 0.26040945, 10.05966329])
mean = np.asarray([ -2.67375523e-01, -1.19147040e-02, 1.14497274e-02, 1.10903410e-03, 2.10509948e-02, -4.02013549e+00])
else:
mean = np.asarray([ -9.53197445e-03, -1.05196691e-03, -1.07545642e-02,
2.08785638e-02, -9.27858049e-02, -2.58052205e+00])
std = np.asarray([ 1.02316223, 0.66477511, 1.03806996, 5.75692889, 1.37604962,
7.43157247])
pose = (pose - mean)/std
return pose
########################################
# Fixated and Non-fixated Camera Pose #
########################################
def nonfixated_camera_pose( filename ):
"""
Return two 6DOF camera pose vectors for two images of nonfixated view.
Filename is in the format:
'/{PATH_TO_DATA_ROOT}/{MODEL_ID}/{domain}
/point_{POINT_ID}_view_{VIEW_ID}_domain_{DOMAIN_NAME}.png'
Parameters:
----------
filename: a filename that embodies what point we are examining
Returns:
-----------
camera_poses: vector that encode the camera pose info for two images
"""
if isinstance(filename, list):
raise ValueError("Having more than two inputs to a fixated camera pose problem")
full_paths = parse_nonfixated_filename( filename )
if len(full_paths) != 2:
raise ValueError(
"camera pose should have filename with 2 point-view, {filename}".format(filename=filename))
pose = calculate_relative_camera_pose(full_paths[0], full_paths[1], fixated=False)
return pose
def nonfixated_camera_rot( filename ):
"""
Return two 6DOF camera pose vectors for two images of nonfixated view.
Filename is in the format:
'/{PATH_TO_DATA_ROOT}/{MODEL_ID}/{domain}
/point_{POINT_ID}_view_{VIEW_ID}_domain_{DOMAIN_NAME}.png'
Parameters:
----------
filename: a filename that embodies what point we are examining
Returns:
-----------
camera_poses: vector that encode the camera pose info for two images
"""
if isinstance(filename, list):
raise ValueError("Having more than two inputs to a fixated camera pose problem")
full_paths = parse_nonfixated_filename( filename )
if len(full_paths) != 2:
raise ValueError(
"camera pose should have filename with 2 point-view, {filename}".format(filename=filename))
pose = calculate_relative_camera_pose(full_paths[0], full_paths[1], fixated=False)
rot = pose[:3]
return rot
def fixated_camera_pose( filename ):
"""
Return two 6DOF camera pose vectors for two images of fixated view.
Filename is in the format:
'/{PATH_TO_DATA_ROOT}/{MODEL_ID}/{domain}
/point_{POINT_ID}_view_{VIEW_ID}_domain_{DOMAIN_NAME}.png'
Parameters:
----------
filename: a filename that embodies what point we are examining
Returns:
-----------
camera_poses: vector that encode the camera pose info for two images
"""
if isinstance(filename, list):
raise ValueError("Having more than two inputs to a fixated camera pose problem")
full_paths = parse_fixated_filename(filename)
if len(full_paths) != 2:
raise ValueError(
"camera pose should have filename with 2 point-view, {filename}".format(filename=filename))
pose = calculate_relative_camera_pose(full_paths[0], full_paths[1])
return pose
def fixated_camera_rot( filename ):
"""
Return two 6DOF camera pose vectors for two images of fixated view.
Filename is in the format:
'/{PATH_TO_DATA_ROOT}/{MODEL_ID}/{domain}
/point_{POINT_ID}_view_{VIEW_ID}_domain_{DOMAIN_NAME}.png'
Parameters:
----------
filename: a filename that embodies what point we are examining
Returns:
-----------
camera_poses: vector that encode the camera pose info for two images
"""
if isinstance(filename, list):
raise ValueError("Having more than two inputs to a fixated camera pose problem")
full_paths = parse_fixated_filename(filename)
if len(full_paths) != 2:
raise ValueError(
"camera pose should have filename with 2 point-view, {filename}".format(filename=filename))
pose = calculate_relative_camera_pose(full_paths[0], full_paths[1])
rot = pose[:3]
return rot
#################
# Ego-Motion #
#################
def triplet_fixated_egomotion( filename ):
"""
Given a filename that contains 3 different point-view combos, parse the filename
and return the pair-wise camera pose.
Parameters:
-----------
filename: a filename in the specific format.
Returns:
-----------
egomotion: a numpy array of length 18 (3x6).
(a concatanation of 3 6-DOF relative camera pose vector)
"""
if isinstance(filename, list):
raise ValueError("Having more than two inputs to a fixated camera pose problem")
full_paths = parse_fixated_filename(filename)
if len(full_paths) != 3 :
raise ValueError("quadruplet first view prediction with list shorter than 3")
# perm = range(3)
# random.shuffle(perm)
#full_paths = [full_paths[i] for i in perm]
poses = []
for i in range(2):
for j in range(i+1, 3):
pose = calculate_relative_camera_pose(full_paths[i], full_paths[j])
poses.append(pose)
poses = | np.hstack(poses) | numpy.hstack |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import pytest
import numpy as np
from cntk import *
def test_outputs():
fwd_state = placeholder("placeholder")
prev_state = past_value(fwd_state, name="prev_state")
z = abs(prev_state, "abs")
output = z.output
z = z.replace_placeholders({fwd_state: z.output})
fwd_state = None
prev_state = None
z = None
for arg in output.owner.arguments:
print("Argument name: {}, argument owner name {}".format(arg.name, arg.owner.name))
def test_0d_data_1d_sample_shape():
x = input(shape=(1,))
op = x + x
with pytest.raises(ValueError):
op.eval({x : [ | np.asarray(2) | numpy.asarray |
import random
import time
import datetime
import os
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from sklearn.metrics import confusion_matrix, accuracy_score, balanced_accuracy_score
from sklearn.utils.multiclass import unique_labels
from visdom import Visdom
from torch.autograd import Variable
import torch
def gan2gaze(tensor, mean, std):
mean = mean[np.newaxis, ..., np.newaxis, np.newaxis] # (1, nc, 1, 1)
mean = np.tile(mean, (tensor.size()[0], 1, tensor.size()[2], tensor.size()[3])) # (B, nc, H, W)
mean = torch.from_numpy(mean.astype(np.float32)).cuda()
std = std[np.newaxis, ..., np.newaxis, np.newaxis] # (1, nc, 1, 1)
std = np.tile(std, (tensor.size()[0], 1, tensor.size()[2], tensor.size()[3])) # (B, nc, H, W)
std = torch.from_numpy(std.astype(np.float32)).cuda()
return (tensor*0.5+0.5 - mean)/std
def gaze2gan(tensor, mean, std):
mean = mean[np.newaxis, ..., np.newaxis, np.newaxis] # (1, nc, 1, 1)
mean = np.tile(mean, (tensor.size()[0], 1, tensor.size()[2], tensor.size()[3])) # (B, nc, H, W)
mean = torch.from_numpy(mean.astype(np.float32)).cuda()
std = std[np.newaxis, ..., np.newaxis, np.newaxis] # (1, nc, 1, 1)
std = np.tile(std, (tensor.size()[0], 1, tensor.size()[2], tensor.size()[3])) # (B, nc, H, W)
std = torch.from_numpy(std.astype(np.float32)).cuda()
return (tensor*std+mean - 0.5)/0.5
def tensor2image(tensor, mean, std):
mean = mean[..., np.newaxis, np.newaxis] # (nc, 1, 1)
mean = np.tile(mean, (1, tensor.size()[2], tensor.size()[3])) # (nc, H, W)
std = std[..., np.newaxis, np.newaxis] # (nc, 1, 1)
std = np.tile(std, (1, tensor.size()[2], tensor.size()[3])) # (nc, H, W)
image = 255.0*(std*tensor[0].cpu().float().numpy() + mean) # (nc, H, W)
if image.shape[0] == 1:
image = np.tile(image, (3, 1, 1))
return image.astype(np.uint8) # (3, H, W)
class Logger():
def __init__(self, n_epochs, batches_epoch, mean=0.0, std=1.0):
self.viz = Visdom()
self.n_epochs = n_epochs
self.batches_epoch = batches_epoch
self.epoch = 1
self.batch = 1
self.prev_time = time.time()
self.mean_period = 0
self.losses = {}
self.loss_windows = {}
self.image_windows = {}
self.mean = mean
self.std = std
def log(self, losses=None, images=None):
self.mean_period += (time.time() - self.prev_time)
self.prev_time = time.time()
for i, loss_name in enumerate(losses.keys()):
if loss_name not in self.losses:
self.losses[loss_name] = losses[loss_name].item()
else:
self.losses[loss_name] += losses[loss_name].item()
batches_done = self.batches_epoch*(self.epoch - 1) + self.batch
batches_left = self.batches_epoch*(self.n_epochs - self.epoch) + self.batches_epoch - self.batch
# Draw images
for image_name, tensor in images.items():
if image_name not in self.image_windows:
self.image_windows[image_name] = self.viz.image(tensor2image(tensor.data, self.mean, self.std), opts={'title':image_name})
else:
self.viz.image(tensor2image(tensor.data, self.mean, self.std), win=self.image_windows[image_name], opts={'title':image_name})
# End of epoch
if (self.batch % self.batches_epoch) == 0:
# Plot losses
for loss_name, loss in self.losses.items():
if loss_name not in self.loss_windows:
self.loss_windows[loss_name] = self.viz.line(X=np.array([self.epoch]), Y=np.array([loss/self.batch]),
opts={'xlabel': 'epochs', 'ylabel': loss_name, 'title': loss_name})
else:
self.viz.line(X=np.array([self.epoch]), Y=np.array([loss/self.batch]), win=self.loss_windows[loss_name], update='append')
# Reset losses for next epoch
self.losses[loss_name] = 0.0
self.epoch += 1
self.batch = 1
#sys.stdout.write('\n')
else:
self.batch += 1
class ReplayBuffer():
def __init__(self, max_size=50):
assert (max_size > 0), 'Empty buffer or trying to create a black hole. Be careful.'
self.max_size = max_size
self.data = []
def push_and_pop(self, data):
to_return = []
for element in data.data:
element = torch.unsqueeze(element, 0)
if len(self.data) < self.max_size:
self.data.append(element)
to_return.append(element)
else:
if random.uniform(0,1) > 0.5:
i = random.randint(0, self.max_size-1)
to_return.append(self.data[i].clone())
self.data[i] = element
else:
to_return.append(element)
return Variable(torch.cat(to_return))
class LambdaLR():
def __init__(self, n_epochs, offset, decay_start_epoch):
assert ((n_epochs - decay_start_epoch) > 0), "Decay must start before the training session ends!"
self.n_epochs = n_epochs
self.offset = offset
self.decay_start_epoch = decay_start_epoch
def step(self, epoch):
return 1.0 - max(0, epoch + self.offset - self.decay_start_epoch)/(self.n_epochs - self.decay_start_epoch)
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant(m.bias.data, 0.0)
def plot_confusion_matrix(y_true, y_pred, classes, output_dir=None, normalize=True, title=None, cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# merge "Eyes Closed" and "Lap" classes
y_true[y_true == 4] = 0
y_pred[y_pred == 4] = 0
# change GT "Shoulder" to "Left Mirror"
y_true[np.logical_and(y_true == 2, y_pred == 3)] = 3
# change GT "Shoulder" to "Right Mirror"
y_true[np.logical_and(y_true == 2, y_pred == 8)] = 8
# change prediction "Shoulder" to "Left Mirror"
y_pred[np.logical_and(y_pred == 2, y_true == 3)] = 3
# change prediction "Shoulder" to "Right Mirror"
y_pred[np.logical_and(y_pred == 2, y_true == 8)] = 8
# remove "Shoulder" class
retain = np.logical_and(y_pred != 2, y_true != 2)
y_true = y_true[retain]
y_pred = y_pred[retain]
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks= | np.arange(cm.shape[0]) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 14 14:33:11 2016
@author: lewisli
"""
import numpy as np
import matplotlib.pyplot as plt
class DataSet(object):
def __init__(self, images, labels=None):
"""Construct a DataSet for use with TensorFlow
Args:
images: 3D np array containing (2D) images.
labels: labels corresponding to images (optional)
"""
self._num_dims = images.ndim - 1
self._num_examples = images.shape[self._num_dims]
self._num_rows = images.shape[0]
self._num_cols = images.shape[1]
# Check to see if labels is set
if labels is None:
self._supervised = False
labels = np.zeros(self._num_examples)
else:
assert self._num_examples == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape,
labels.shape))
self._supervised = True
# Convert shape from [rows, columns, num_examples]
# to [num examples,rows*columns,]
images = images.reshape(self._num_rows*self._num_cols,self. _num_examples)
# Do we need to normalize images???
images = images.astype(np.float32).transpose()
images = (images-images.min())/(images.max() - images.min())
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = | np.arange(self._num_examples) | numpy.arange |
import numpy as np
import cv2 as cv
from Data_Augmentation.image_transformer import ImageTransformer
from Data_Augmentation.utility import getTheBoundRect
import sys
import random
padding=50
class SampImgModifier:
def __init__(self,image,size,lower,upper,bgcolor):
self.height=size[0]+padding*2
self.width=size[1]+padding*2
self.channels=size[2]
self.image = bgcolor* np.ones((self.height,self.width,self.channels),np.uint8)
self.image[padding:(self.height-padding),padding:(self.width-padding)]=np.copy(image[0:size[0],0:size[1]])
self.modifiedFlag=0
self.lower=lower
self.upper=upper
self.maskImage=cv.inRange(self.image,lower,upper)
self.modifiedImg=np.copy(self.image)
def addGaussianNoise(self,noiseMean,noiseVariance):
noiseSigma = noiseVariance ** 0.5
foregrndPix = ( | np.where(self.maskImage == 0) | numpy.where |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cmath
import numpy as np
import pytest
import cirq
from cirq.linalg import matrix_commutes
def test_is_diagonal():
assert cirq.is_diagonal(np.empty((0, 0)))
assert cirq.is_diagonal(np.empty((1, 0)))
assert cirq.is_diagonal(np.empty((0, 1)))
assert cirq.is_diagonal(np.array([[1]]))
assert cirq.is_diagonal(np.array([[-1]]))
assert cirq.is_diagonal(np.array([[5]]))
assert cirq.is_diagonal(np.array([[3j]]))
assert cirq.is_diagonal(np.array([[1, 0]]))
assert cirq.is_diagonal(np.array([[1], [0]]))
assert not cirq.is_diagonal(np.array([[1, 1]]))
assert not cirq.is_diagonal(np.array([[1], [1]]))
assert cirq.is_diagonal(np.array([[5j, 0], [0, 2]]))
assert cirq.is_diagonal(np.array([[1, 0], [0, 1]]))
assert not cirq.is_diagonal(np.array([[1, 0], [1, 1]]))
assert not cirq.is_diagonal(np.array([[1, 1], [0, 1]]))
assert not cirq.is_diagonal(np.array([[1, 1], [1, 1]]))
assert not cirq.is_diagonal(np.array([[1, 0.1], [0.1, 1]]))
assert cirq.is_diagonal(np.array([[1, 1e-11], [1e-10, 1]]))
def test_is_diagonal_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_diagonal(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert not cirq.is_diagonal(np.array([[1, 0], [-0.6, 1]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_diagonal(np.array([[1, 0.5], [-0.5, 1]]), atol=atol)
assert not cirq.is_diagonal(np.array([[1, 0.5], [-0.6, 1]]), atol=atol)
def test_is_hermitian():
assert cirq.is_hermitian(np.empty((0, 0)))
assert not cirq.is_hermitian(np.empty((1, 0)))
assert not cirq.is_hermitian(np.empty((0, 1)))
assert cirq.is_hermitian(np.array([[1]]))
assert cirq.is_hermitian(np.array([[-1]]))
assert cirq.is_hermitian(np.array([[5]]))
assert not cirq.is_hermitian(np.array([[3j]]))
assert not cirq.is_hermitian(np.array([[0, 0]]))
assert not cirq.is_hermitian(np.array([[0], [0]]))
assert not cirq.is_hermitian(np.array([[5j, 0], [0, 2]]))
assert cirq.is_hermitian(np.array([[5, 0], [0, 2]]))
assert cirq.is_hermitian(np.array([[1, 0], [0, 1]]))
assert not cirq.is_hermitian(np.array([[1, 0], [1, 1]]))
assert not cirq.is_hermitian(np.array([[1, 1], [0, 1]]))
assert cirq.is_hermitian(np.array([[1, 1], [1, 1]]))
assert cirq.is_hermitian(np.array([[1, 1j], [-1j, 1]]))
assert cirq.is_hermitian(np.array([[1, 1j], [-1j, 1]]) * np.sqrt(0.5))
assert not cirq.is_hermitian(np.array([[1, 1j], [1j, 1]]))
assert not cirq.is_hermitian(np.array([[1, 0.1], [-0.1, 1]]))
assert cirq.is_hermitian(np.array([[1, 1j + 1e-11], [-1j, 1 + 1j * 1e-9]]))
def test_is_hermitian_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_hermitian(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert cirq.is_hermitian(np.array([[1, 0.25], [-0.25, 1]]), atol=atol)
assert not cirq.is_hermitian(np.array([[1, 0], [-0.6, 1]]), atol=atol)
assert not cirq.is_hermitian(np.array([[1, 0.25], [-0.35, 1]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_hermitian(np.array([[1, 0.5, 0.5], [0, 1, 0], [0, 0, 1]]), atol=atol)
assert not cirq.is_hermitian(np.array([[1, 0.5, 0.6], [0, 1, 0], [0, 0, 1]]), atol=atol)
assert not cirq.is_hermitian(np.array([[1, 0, 0.6], [0, 1, 0], [0, 0, 1]]), atol=atol)
def test_is_unitary():
assert cirq.is_unitary(np.empty((0, 0)))
assert not cirq.is_unitary(np.empty((1, 0)))
assert not cirq.is_unitary(np.empty((0, 1)))
assert cirq.is_unitary(np.array([[1]]))
assert cirq.is_unitary(np.array([[-1]]))
assert cirq.is_unitary(np.array([[1j]]))
assert not cirq.is_unitary(np.array([[5]]))
assert not cirq.is_unitary(np.array([[3j]]))
assert not cirq.is_unitary(np.array([[1, 0]]))
assert not cirq.is_unitary(np.array([[1], [0]]))
assert not cirq.is_unitary(np.array([[1, 0], [0, -2]]))
assert cirq.is_unitary(np.array([[1, 0], [0, -1]]))
assert cirq.is_unitary(np.array([[1j, 0], [0, 1]]))
assert not cirq.is_unitary(np.array([[1, 0], [1, 1]]))
assert not cirq.is_unitary(np.array([[1, 1], [0, 1]]))
assert not cirq.is_unitary(np.array([[1, 1], [1, 1]]))
assert not cirq.is_unitary(np.array([[1, -1], [1, 1]]))
assert cirq.is_unitary(np.array([[1, -1], [1, 1]]) * np.sqrt(0.5))
assert cirq.is_unitary(np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5))
assert not cirq.is_unitary(np.array([[1, -1j], [1j, 1]]) * np.sqrt(0.5))
assert cirq.is_unitary(np.array([[1, 1j + 1e-11], [1j, 1 + 1j * 1e-9]]) * np.sqrt(0.5))
def test_is_unitary_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_unitary(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert not cirq.is_unitary(np.array([[1, 0], [-0.6, 1]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_unitary(np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]), atol=atol)
assert not cirq.is_unitary(np.array([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1.2]]), atol=atol)
def test_is_orthogonal():
assert cirq.is_orthogonal(np.empty((0, 0)))
assert not cirq.is_orthogonal(np.empty((1, 0)))
assert not cirq.is_orthogonal(np.empty((0, 1)))
assert cirq.is_orthogonal(np.array([[1]]))
assert cirq.is_orthogonal(np.array([[-1]]))
assert not cirq.is_orthogonal(np.array([[1j]]))
assert not cirq.is_orthogonal(np.array([[5]]))
assert not cirq.is_orthogonal(np.array([[3j]]))
assert not cirq.is_orthogonal(np.array([[1, 0]]))
assert not cirq.is_orthogonal(np.array([[1], [0]]))
assert not cirq.is_orthogonal(np.array([[1, 0], [0, -2]]))
assert cirq.is_orthogonal(np.array([[1, 0], [0, -1]]))
assert not cirq.is_orthogonal(np.array([[1j, 0], [0, 1]]))
assert not cirq.is_orthogonal(np.array([[1, 0], [1, 1]]))
assert not cirq.is_orthogonal(np.array([[1, 1], [0, 1]]))
assert not cirq.is_orthogonal(np.array([[1, 1], [1, 1]]))
assert not cirq.is_orthogonal(np.array([[1, -1], [1, 1]]))
assert cirq.is_orthogonal(np.array([[1, -1], [1, 1]]) * np.sqrt(0.5))
assert not cirq.is_orthogonal(np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5))
assert not cirq.is_orthogonal(np.array([[1, -1j], [1j, 1]]) * np.sqrt(0.5))
assert cirq.is_orthogonal(np.array([[1, 1e-11], [0, 1 + 1e-11]]))
def test_is_orthogonal_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_orthogonal(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert not cirq.is_orthogonal(np.array([[1, 0], [-0.6, 1]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_orthogonal(np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]), atol=atol)
assert not cirq.is_orthogonal(np.array([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1.2]]), atol=atol)
def test_is_special_orthogonal():
assert cirq.is_special_orthogonal(np.empty((0, 0)))
assert not cirq.is_special_orthogonal(np.empty((1, 0)))
assert not cirq.is_special_orthogonal(np.empty((0, 1)))
assert cirq.is_special_orthogonal(np.array([[1]]))
assert not cirq.is_special_orthogonal(np.array([[-1]]))
assert not cirq.is_special_orthogonal(np.array([[1j]]))
assert not cirq.is_special_orthogonal(np.array([[5]]))
assert not cirq.is_special_orthogonal(np.array([[3j]]))
assert not cirq.is_special_orthogonal(np.array([[1, 0]]))
assert not cirq.is_special_orthogonal(np.array([[1], [0]]))
assert not cirq.is_special_orthogonal(np.array([[1, 0], [0, -2]]))
assert not cirq.is_special_orthogonal(np.array([[1, 0], [0, -1]]))
assert cirq.is_special_orthogonal(np.array([[-1, 0], [0, -1]]))
assert not cirq.is_special_orthogonal(np.array([[1j, 0], [0, 1]]))
assert not cirq.is_special_orthogonal(np.array([[1, 0], [1, 1]]))
assert not cirq.is_special_orthogonal(np.array([[1, 1], [0, 1]]))
assert not cirq.is_special_orthogonal(np.array([[1, 1], [1, 1]]))
assert not cirq.is_special_orthogonal(np.array([[1, -1], [1, 1]]))
assert cirq.is_special_orthogonal(np.array([[1, -1], [1, 1]]) * np.sqrt(0.5))
assert not cirq.is_special_orthogonal(np.array([[1, 1], [1, -1]]) * np.sqrt(0.5))
assert not cirq.is_special_orthogonal(np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5))
assert not cirq.is_special_orthogonal(np.array([[1, -1j], [1j, 1]]) * np.sqrt(0.5))
assert cirq.is_special_orthogonal(np.array([[1, 1e-11], [0, 1 + 1e-11]]))
def test_is_special_orthogonal_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_special_orthogonal(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert not cirq.is_special_orthogonal(np.array([[1, 0], [-0.6, 1]]), atol=atol)
# Error isn't accumulated across entries, except for determinant factors.
assert cirq.is_special_orthogonal(
np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1 / 1.2]]), atol=atol
)
assert not cirq.is_special_orthogonal(
np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]), atol=atol
)
assert not cirq.is_special_orthogonal(
np.array([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1 / 1.2]]), atol=atol
)
def test_is_special_unitary():
assert cirq.is_special_unitary(np.empty((0, 0)))
assert not cirq.is_special_unitary(np.empty((1, 0)))
assert not cirq.is_special_unitary(np.empty((0, 1)))
assert cirq.is_special_unitary(np.array([[1]]))
assert not cirq.is_special_unitary(np.array([[-1]]))
assert not cirq.is_special_unitary(np.array([[5]]))
assert not cirq.is_special_unitary(np.array([[3j]]))
assert not cirq.is_special_unitary(np.array([[1, 0], [0, -2]]))
assert not cirq.is_special_unitary(np.array([[1, 0], [0, -1]]))
assert cirq.is_special_unitary(np.array([[-1, 0], [0, -1]]))
assert not cirq.is_special_unitary(np.array([[1j, 0], [0, 1]]))
assert cirq.is_special_unitary(np.array([[1j, 0], [0, -1j]]))
assert not cirq.is_special_unitary(np.array([[1, 0], [1, 1]]))
assert not cirq.is_special_unitary(np.array([[1, 1], [0, 1]]))
assert not cirq.is_special_unitary(np.array([[1, 1], [1, 1]]))
assert not cirq.is_special_unitary(np.array([[1, -1], [1, 1]]))
assert cirq.is_special_unitary(np.array([[1, -1], [1, 1]]) * np.sqrt(0.5))
assert cirq.is_special_unitary(np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5))
assert not cirq.is_special_unitary(np.array([[1, -1j], [1j, 1]]) * np.sqrt(0.5))
assert cirq.is_special_unitary(np.array([[1, 1j + 1e-11], [1j, 1 + 1j * 1e-9]]) * np.sqrt(0.5))
def test_is_special_unitary_tolerance():
atol = 0.5
# Pays attention to specified tolerance.
assert cirq.is_special_unitary(np.array([[1, 0], [-0.5, 1]]), atol=atol)
assert not cirq.is_special_unitary(np.array([[1, 0], [-0.6, 1]]), atol=atol)
assert cirq.is_special_unitary(np.array([[1, 0], [0, 1]]) * cmath.exp(1j * 0.1), atol=atol)
assert not cirq.is_special_unitary(np.array([[1, 0], [0, 1]]) * cmath.exp(1j * 0.3), atol=atol)
# Error isn't accumulated across entries, except for determinant factors.
assert cirq.is_special_unitary(np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1 / 1.2]]), atol=atol)
assert not cirq.is_special_unitary(np.array([[1.2, 0, 0], [0, 1.2, 0], [0, 0, 1.2]]), atol=atol)
assert not cirq.is_special_unitary(
np.array([[1.2, 0, 0], [0, 1.3, 0], [0, 0, 1 / 1.2]]), atol=atol
)
def test_is_normal():
assert cirq.is_normal(np.array([[1]]))
assert cirq.is_normal(np.array([[3j]]))
assert cirq.is_normal(cirq.testing.random_density_matrix(4))
assert cirq.is_normal(cirq.testing.random_unitary(5))
assert not cirq.is_normal(np.array([[0, 1], [0, 0]]))
assert not cirq.is_normal(np.zeros((1, 0)))
def test_is_normal_tolerance():
atol = 0.25
# Pays attention to specified tolerance.
assert cirq.is_normal(np.array([[0, 0.5], [0, 0]]), atol=atol)
assert not cirq.is_normal(np.array([[0, 0.6], [0, 0]]), atol=atol)
# Error isn't accumulated across entries.
assert cirq.is_normal(np.array([[0, 0.5, 0], [0, 0, 0.5], [0, 0, 0]]), atol=atol)
assert not cirq.is_normal(np.array([[0, 0.5, 0], [0, 0, 0.6], [0, 0, 0]]), atol=atol)
def test_commutes():
assert matrix_commutes(np.empty((0, 0)), np.empty((0, 0)))
assert not matrix_commutes(np.empty((1, 0)), np.empty((0, 1)))
assert not matrix_commutes(np.empty((0, 1)), np.empty((1, 0)))
assert not matrix_commutes( | np.empty((1, 0)) | numpy.empty |
import streamlit as st
# To make things easier later, we're also importing numpy and pandas for
# working with sample data.
import numpy as np
import pandas as pd
from scipy.integrate import *
import scipy.optimize
import matplotlib.pyplot as plt
from functools import partial
import os, sys
st.sidebar.markdown("## Parameters used in the simulation")
st.sidebar.markdown("Enter your own custom values to run the model")
je = float(st.sidebar.text_input('Current density j_e [10^10 A/m^2]', 10))
periSampl = 1000 #
class Parameters:
gamma = 2.2128e5
alpha = float(st.sidebar.text_input('Gilbert damping constant', 1))
K1 = float(st.sidebar.text_input('Anisotropy constant K_1 [J/m^3]', 1.5 * 9100))
Js = float(st.sidebar.text_input('Saturation magnetization Js [T]', 0.65))
RAHE = float(st.sidebar.text_input('Anomalous Hall effect coefficient', 0.65))
d = float(st.sidebar.text_input('FM layer thickness [nm]', (0.6+1.2+1.1) * 1e-9))
frequency = float(st.sidebar.text_input('AC frequency [Hz]', 0.1e9))
currentd = je * 1e10
hbar = 1.054571e-34
e = 1.602176634e-19
mu0 = 4 * 3.1415927 * 1e-7
easy_axis = | np.array([0,0,1]) | numpy.array |
# <NAME>
# PSM d18O Ice Core
# ARCHIVE SUB-MODEL
# Modified 01/26/2015 <<EMAIL>>
# Modified 11/18/2015 <<EMAIL>>
# =======================================================================================
def diffusivity(rho,T=250,P=0.9,rho_d=822,b=1.3):
'''
DOCSTRING: Function 'diffusivity'
Description: Calculates diffusivity (in m^2/s) as a function of density.
Inputs:
P: Ambient Pressure in Atm
T: Temperature in K
rho: density profile (kg/m^3)
rho_d: 822 kg/m^2 [default], density at which ice becomes impermeable to diffusion
Defaults are available for all but rho, so only one argument need be entered.
Note values for diffusivity in air:
D16 = 2.1e-5*(T/273.15)^1.94*1/P
D18 = D16/1.0285
D2 = D16/1.0251
D17 = D16/((D16/D18)^0.518)
Reference: Johnsen et al. (2000): Diffusion of Stable isotopes in polar firn and ice:
the isotope effect in firn diffusion
'''
import numpy as np
import scipy
from scipy import integrate
import matplotlib.pyplot as plt
# Set Constants
R = 8.314478 # Gas constant
m = 18.02e-3 # molar weight of water (in kg)
alpha18 = np.exp(11.839/T-28.224e-3) # ice-vapor fractionation for oxygen 18
p=np.exp(9.5504+3.53*np.log(T)-5723.265/T-0.0073*T) # saturation vapor pressure
Po = 1. # reference pressure, atmospheres
ppa=3.454e12*np.exp(-6133/T)
rho_i = 920.# kg/m^3, density of solid ice
# Set diffusivity in air (units of m^2/s)
Da = 2.1e-5*np.power((T/273.15),1.94)*(Po/P)
Dai = Da/1.0285
# Calculate Tortuosity
invtau=np.zeros(len(rho))
for i in range(len(rho)):
if rho[i]<=rho_i/np.sqrt(b):
#invtau[i]=1.-1.3*np.power((rho[i]/rho_d),2)
invtau[i]=1.-1.3* | np.power((rho[i]/rho_i),2) | numpy.power |
import numpy as np
import schedule
import action
import formatter
# Creates an argumentation framework representing feasibility as an adjacency matrix
def construct_feasibility_framework(m, n):
ff = np.zeros((m, n, m, n), dtype=bool)
for j in range(n):
ff[:, j, :, j] = np.logical_not(np.identity(m))
return ff
def construct_partial_feasibility_framework(m, n, i1, j):
ff = np.zeros((m, n), dtype=bool)
for i2 in range(m):
if i1 != i2:
ff[i2, j] = True
return ff
# Creates an efficiency framework from a feasibility framework
def construct_efficiency_framework(m, p, nfd, pfd, S, ff, copy, options):
if copy:
ef = np.copy(ff)
else:
ef = ff
C = schedule.calc_completion_times(p, S)
C_max = np.max(C) if m > 0 else 0
if m == 0:
return ef, C, C_max
M = range(m)
J = [np.flatnonzero(S[i,:]) for i in M]
# If feasible assignment (i1, j1)
i1 = np.argmax(C)
for j1 in J[i1]:
for i2 in M:
# Single exchange property
if C[i1] > C[i2] + p[j1] and (not options['fixed'] or not pfd[i1, j1] and not nfd[i2, j1]):
ef[i1, j1, i2, j1] = False
# If feasible assignment (i2, j2)
for j2 in J[i2]:
# Pairwise exchange property
if (i1 != i2 and j1 != j2 and
p[j1] > p[j2] and
C[i1] + p[j2] > C[i2] + p[j1] and
(not options['fixed'] or not pfd[i1, j1] and not pfd[i2, j2] and
not nfd[i2, j1] and not nfd[i1, j2])):
ef[i1, j1, i2, j2] = True
return ef, C, C_max
def construct_partial_efficiency_framework(m, p, nfd, pfd, S, C, C_max, i1, j1, options):
_, n = S.shape
ef = construct_partial_feasibility_framework(m, n, i1, j1)
J = [np.flatnonzero(S[i,:]) for i in range(m)]
# Assume i1 < m
if i1 == np.argmax(C):
for i2 in range(m):
# Single exchange property
if C[i1] > C[i2] + p[j1] and (not options['fixed'] or not pfd[i1, j1] and not nfd[i2, j1]):
ef[i2, j1] = False
# If feasible assignment (i2, j2)
for j2 in J[i2]:
# Pairwise exchange property
if (i1 != i2 and j1 != j2 and
p[j1] > p[j2] and
C[i1] + p[j2] > C[i2] + p[j1] and
(not options['fixed'] or not pfd[i1, j1] and not pfd[i2, j2] and
not nfd[i2, j1] and not nfd[i1, j2])):
ef[i2, j2] = True
return ef
# Creates a fixed decision framework from a feasibility framework
def construct_satisfaction_framework(nfd, pfd, ff, copy=True):
if copy:
df = np.copy(ff)
else:
df = ff
(m, n) = nfd.shape
for i in range(m):
for j in range(n):
if nfd[i, j]:
df[i, j, i, j] = True
if pfd[i, j]:
df[:, j, i, j] = False
return df
def construct_partial_satisfaction_framework(nfd, pfd, i1, j):
m, n = nfd.shape
df = construct_partial_feasibility_framework(m, n, i1, j)
if nfd[i1, j]:
df[i1, j] = True
for i2 in range(m):
if pfd[i2, j]:
df[i2, j] = False
return df
# Attempt to build arguments to explain why S is not a stable extension of f
def compute_unattacked(S, f, ignore_unattacked, precomputed=True):
m, n = S.shape
unattacked = np.logical_not(S)
for i in range(m):
for j in range(n):
if S[i, j]:
if precomputed:
f_partial = f[i, j, :, :]
else:
f_partial = f(i, j)
unattacked = np.logical_and(unattacked, np.logical_not(f_partial))
if not ignore_unattacked is None:
unattacked = np.logical_and(unattacked, np.logical_not(ignore_unattacked))
return unattacked
def compute_partial_conflicts(S, f, ignore_conflicts, i, j, precomputed=True):
m, n = S.shape
conflicts = np.zeros((m, n), dtype=bool)
if S[i, j]:
if precomputed:
f_partial = f[i, j]
else:
f_partial = f(i, j)
conflicts = np.logical_and(f_partial, S)
if not ignore_conflicts is None:
if precomputed:
ignore_conflicts_partial = ignore_conflicts[i, j]
else:
ignore_conflicts_partial = ignore_conflicts(i, j)
conflicts = np.logical_and(conflicts, np.logical_not(ignore_conflicts_partial))
return conflicts
def explain_stability(S, f, ignore_unattacked=None, ignore_conflicts=None):
unattacked = compute_unattacked(S, f, ignore_unattacked)
m, n = S.shape
conflicts = np.zeros((m, n, m, n), dtype=bool)
for i in range(m):
for j in range(n):
conflicts[i, j] = compute_partial_conflicts(S, f,
None if ignore_conflicts is None else ignore_conflicts,
i, j)
return unattacked, conflicts
# Compute reasons for feasibility using stability
def explain_feasibility(unattacked, conflicts, precomputed=True):
(m, n) = unattacked.shape
N = range(n)
if m == 0:
if n == 0:
return True, [('nomachinejob', [])]
else:
return False, [('nomachine', [])]
elif n == 0:
return True, [('nojob', [])]
# Summarise unallocated jobs
unallocated = unattacked[0]
# Summaries overallocated jobs
overallocated = np.zeros(n, dtype=bool)
job_conflicts = np.zeros((n, m), dtype=bool)
for j in N:
# Conflicts are symmetrical, count upper diagonal
for i1 in range(m):
if precomputed:
conflicts_partial = conflicts[i1, j]
else:
conflicts_partial = conflicts(i1, j)
for i2 in range(i1, m):
if conflicts_partial[i2, j]:
overallocated[j] = True
job_conflicts[j, i1] = True
job_conflicts[j, i2] = True
# Generate natural language explanations
if np.any(unallocated) or np.any(overallocated):
# Explain unallocated
reasons = [('unallocated', [j]) for j in range(n) if unallocated[j]]
# Explain overallocations
reasons += [('overallocated', [list( | np.flatnonzero(job_conflicts[j]) | numpy.flatnonzero |
import pickle
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from .utils import I, Z
from .utils import rx
class Layerwise:
def __init__(self, G):
self.graph = G
self.num_qubits = len(G.nodes)
self.edge_list = list(G.edges)
self.hamiltonian = self.get_maxcut_hmt()
self.maxcut = np.max(self.hamiltonian)
self.best_ansatz = self.plusxn()
self.best_params = None
self.exp_arr = None
self.npts = None
self.gmesh = None
self.bmesh = None
self.max_exps = None
self.depth = 0
def tensor_prod(self, u3, qubits):
if 0 in qubits:
ans = u3
else:
ans = I
for idx in range(1, self.num_qubits):
if idx in qubits:
ans = np.kron(ans, u3)
else:
ans = np.kron(ans, I)
return ans
def plusxn(self):
N = 2 ** self.num_qubits
return np.ones((N, 1))/np.sqrt(N)
def get_maxcut_hmt(self):
N = 2 ** self.num_qubits
ans = np.zeros((N, N))
for u, v in self.edge_list:
ans += np.eye(N) - self.tensor_prod(Z, [u, v])
return ans/2
def ehz(self, gamma):
eigs = np.diag(self.hamiltonian)
return np.diag(np.exp(1j*gamma/2*eigs))
def ehx(self, beta):
return self.tensor_prod(rx(2*beta), list(range(self.num_qubits)))
def ansatz(self, gamma, beta):
ans = self.best_ansatz[:, -1][:, np.newaxis]
return self.ehx(beta) @ self.ehz(gamma) @ ans
def expectation(self, gamma, beta):
right = self.ansatz(gamma, beta)
left = right.conj().T
return (left @ self.hamiltonian @ right).real
def create_grid(self, npts, gmin=0, gmax=2*np.pi, bmin=0, bmax=np.pi):
grange = np.linspace(gmin, gmax, npts)
brange = np.linspace(bmin, bmax, npts)
gmesh, bmesh = np.meshgrid(grange, brange)
gg = gmesh.reshape((-1,))
bb = bmesh.reshape((-1,))
exp_arr = np.array(list(map(self.expectation, gg, bb)))\
.reshape((npts, npts))
self.npts = npts
self.gmesh = gmesh
self.bmesh = bmesh
if self.exp_arr is None:
self.exp_arr = exp_arr[:, :, np.newaxis]
else:
self.exp_arr = np.dstack((self.exp_arr, exp_arr))
# def get_max(self, p):
# if self.exp_arr is None:
# raise ValueError('Grid not found. Run create_grid() method first.')
# exp_arr = self.exp_arr[:, :, p-1]
# max_exp = np.max(exp_arr)
# whr = np.where(np.isclose(exp_arr, max_exp))
# indices = zip(whr[0], whr[1])
# angle_list = [(self.gmesh[idx], self.bmesh[idx]) for idx in indices]
# return (max_exp, angle_list)
def find_args(self, p, value):
""" Find the nearest args given a value. """
if self.exp_arr is None:
raise ValueError('Grid not found. Run create_grid() method first.')
dist_arr = np.abs(self.exp_arr[:, :, p-1] - value)
nearest = np.min(dist_arr)
whr = np.where(np.isclose(dist_arr, nearest))
indices = zip(whr[0], whr[1])
angle_list = [(self.gmesh[idx], self.bmesh[idx]) for idx in indices]
return angle_list
def run(self, p_end, npts=50, cutoff=1.0):
for i in range(1, p_end + 1):
print(f'Creating grid for p={i}')
self.create_grid(npts)
max_exp = np.max(self.exp_arr[:, :, i-1])
best_params = self.find_args(i, cutoff * max_exp)[0] # take only one pair of angles
if self.max_exps is None:
self.max_exps = max_exp
else:
self.max_exps = np.hstack((self.max_exps, max_exp))
if self.best_params is None:
self.best_params = | np.array(best_params) | numpy.array |
"""
Created on 2019. 6. 17.
@author: YJHeo
"""
"""
Edited on 2020. 7. 2.
@author: gwkim
"""
from threading import Lock
from ctypes import *
import json
import numpy as np
import os.path
import sys
import socket
from applications.etc.util import PrintMsg
debugging = False
###############################################################################
# Robot S/W version #
###############################################################################
SW_Version = "v2.3.1" # 2020. 01. 14.
#########################################################################
# Global indicator #
#########################################################################
GLOBAL_DICT = {"stop": True, "pause": False}
# GLOBAL_STOP = True
###############################################################################
# Global variables #
###############################################################################
JOINT_DOF = 6
###############################################################################
# Robot Interface #
###############################################################################
SIZE_HEADER = 52
SIZE_COMMAND = 4
SIZE_HEADER_COMMAND = 56
SIZE_DATA_TCP_MAX = 200
SIZE_DATA_MAX = 200
SIZE_DATA_ASCII_MAX = 32
SIZE_PACKET = 256
###############################################################################
# Robot Type #
###############################################################################
ROBOT_INDY7 = "NRMK-Indy7"
ROBOT_INDYRP2 = "NRMK-IndyRP2"
ROBOT_INDY12 = "NRMK-Indy12"
# Deprecated
ROBOT_INDYRP = "NRMK-IndyRP"
ROBOT_INDY3 = "NRMK-Indy3"
ROBOT_INDY5 = "NRMK-Indy5"
ROBOT_INDY10 = "NRMK-Indy10"
ROBOT_INDY15 = "NRMK-Indy15"
ROBOT_OPTI5 = "NRMK-OPTi5"
ROBOT_OPTI10 = "NRMK-OPTi10"
###############################################################################
# C-type Data #
###############################################################################
class HeaderCommandStruct(Structure):
_pack_ = 1
_fields_ = [
("robotName", c_ubyte * 20),
("robotVersion", c_ubyte * 12),
("stepInfo", c_ubyte),
("sof", c_ubyte),
("invokeId", c_uint32),
("dataSize", c_uint32),
("status", c_uint32),
("reserved", c_ubyte * 6),
("cmdId", c_uint32),
]
class HeaderCommand(Union):
_fields_ = [("byte", c_ubyte * SIZE_DATA_TCP_MAX), ("val", HeaderCommandStruct)]
class Data(Union):
_fields_ = [
("byte", c_ubyte * SIZE_DATA_TCP_MAX),
("asciiStr", c_ubyte * (SIZE_DATA_ASCII_MAX + 1)),
("str", c_ubyte * 200),
("charVal", c_ubyte),
("boolVal", c_byte),
("shortVal", c_uint16),
("intVal", c_int32),
("floatVal", c_float),
("doubleVal", c_double),
("byteVal", c_ubyte),
("wordVal", c_ubyte * 2),
("uwordVal", c_ubyte * 2),
("dwordVal", c_ubyte * 4),
("lwordVal", c_ubyte * 8),
("bool6dArr", c_ubyte * 6),
("bool7dArr", c_ubyte * 7),
("boolArr", c_ubyte * 200),
("char2dArr", c_ubyte * 2),
("char3dArr", c_ubyte * 3),
("char6dArr", c_ubyte * 6),
("char7dArr", c_ubyte * 7),
("charArr", c_ubyte * 200),
("int2dArr", c_int32 * 2),
("int3dArr", c_int32 * 3),
("int6dArr", c_int32 * 6),
("int7dArr", c_int32 * 7),
("intArr", c_int32 * 50),
("float3dArr", c_float * 3),
("float6dArr", c_float * 6),
("float7dArr", c_float * 7),
("floatArr", c_float * 50),
("double3dArr", c_double * 3),
("double6dArr", c_double * 6),
("double7dArr", c_double * 7),
("doubleArr", c_double * 50),
("byteArr", c_ubyte * 200),
("wordArr", c_ubyte * 2 * 100),
("uwordArr", c_ubyte * 2 * 100),
("dwordArr", c_ubyte * 4 * 50),
("lwordArr", c_ubyte * 8 * 25),
]
class Packet(Union):
_fields_ = [("header", HeaderCommand), ("data", Data)]
class RobotStatus(Structure):
_fields_ = [
("is_robot_running", c_ubyte),
("is_robot_ready", c_ubyte),
("is_emergency_stop", c_ubyte),
("is_collided", c_ubyte),
("is_error_state", c_ubyte),
("is_busy", c_ubyte),
("is_move_finished", c_ubyte),
("is_home", c_ubyte),
("is_zero", c_ubyte),
("is_in_resetting", c_ubyte),
("is_direct_teaching_mode", c_ubyte),
("is_teaching_mode", c_ubyte),
("is_program_running", c_ubyte),
("is_program_paused", c_ubyte),
("is_conty_connected", c_ubyte),
]
class DIO(Structure):
_fields_ = [("channel", c_uint32), ("value", c_ubyte)]
#########################################################################
# Command #
#########################################################################
CMD_CHECK = 0
CMD_EMERGENCY_STOP = 1
CMD_RESET_ROBOT = 2
CMD_SET_SERVO = 3
CMD_SET_BRAKE = 4
CMD_STOP = 5
CMD_MOVE = 6
CMD_MOVE_HOME = 7
CMD_MOVE_ZERO = 8
CMD_JOINT_MOVE_TO = 9
CMD_JOINT_MOVE_BY = 10
CMD_TASK_MOVE_TO = 11
CMD_TASK_MOVE_BY = 12
CMD_START_CURRENT_PROGRAM = 14
CMD_PAUSE_CURRENT_PROGRAM = 15
CMD_RESUME_CURRENT_PROGRAM = 16
CMD_STOP_CURRENT_PROGRAM = 17
CMD_START_DEFAULT_PROGRAM = 18
CMD_REGISTER_DEFAULT_PROGRAM_IDX = 19
CMD_GET_REGISTERED_DEFAULT_PROGRAM_IDX = 20
CMD_IS_ROBOT_RUNNING = 30
CMD_IS_READY = 31
CMD_IS_EMG = 32
CMD_IS_COLLIDED = 33
CMD_IS_ERR = 34
CMD_IS_BUSY = 35
CMD_IS_MOVE_FINISEHD = 36
CMD_IS_HOME = 37
CMD_IS_ZERO = 38
CMD_IS_IN_RESETTING = 39
CMD_IS_DIRECT_TECAHING = 60
CMD_IS_TEACHING = 61
CMD_IS_PROGRAM_RUNNING = 62
CMD_IS_PROGRAM_PAUSED = 63
CMD_IS_CONTY_CONNECTED = 64
CMD_CHANGE_DIRECT_TEACHING = 80
CMD_FINISH_DIRECT_TEACHING = 81
CMD_JOINT_PUSH_BACK_WAYPOINT_SET = 90
CMD_JOINT_POP_BACK_WAYPOINT_SET = 91
CMD_JOINT_CLEAR_WAYPOINT_SET = 92
CMD_JOINT_EXECUTE_WAYPOINT_SET = 94
CMD_TASK_PUSH_BACK_WAYPOINT_SET = 95
CMD_TASK_POP_BACK_WAYPOINT_SET = 96
CMD_TASK_CLEAR_WAYPOINT_SET = 97
CMD_TASK_EXECUTE_WAYPOINT_SET = 99
CMD_SET_DEFAULT_TCP = 100
CMD_RESET_DEFAULT_TCP = 101
CMD_SET_COMP_TCP = 102
CMD_RESET_COMP_TCP = 103
CMD_SET_REFFRAME = 104
CMD_RESET_REFFRAME = 105
CMD_SET_COLLISION_LEVEL = 106
CMD_SET_JOINT_BOUNDARY = 107
CMD_SET_TASK_BOUNDARY = 108
CMD_SET_JOINT_WTIME = 111
CMD_SET_TASK_WTIME = 112
CMD_SET_TASK_CMODE = 113
CMD_SET_JOINT_BLEND_RADIUS = 116
CMD_SET_TASK_BLEND_RADIUS = 117
CMD_SET_REDUCED_MODE = 130 # not released
CMD_SET_REDUCED_SPEED_RATIO = 131 # not released
CMD_GET_REDUCED_MODE = 230 # not released
CMD_GET_REDUCED_SPEED_RATIO = 231 # not released
CMD_GET_DEFAULT_TCP = 200
CMD_GET_COMP_TCP = 201
CMD_GET_REFFRAME = 202
CMD_GET_COLLISION_LEVEL = 203
CMD_GET_JOINT_BOUNDARY = 204
CMD_GET_TASK_BOUNDARY = 205
CMD_GET_JOINT_WTIME = 208
CMD_GET_TASK_WTIME = 209
CMD_GET_TASK_CMODE = 210
CMD_GET_JOINT_BLEND_RADIUS = 213
CMD_GET_TASK_BLEND_RADIUS = 214
CMD_GET_RUNNING_TIME = 300
CMD_GET_CMODE = 301
CMD_GET_JOINT_STATE = 302
CMD_GET_JOINT_POSITION = 320
CMD_GET_JOINT_VELOCITY = 321
CMD_GET_TASK_POSITION = 322
CMD_GET_TASK_VELOCITY = 323
CMD_GET_TORQUE = 324
CMD_GET_INV_KIN = 325
CMD_GET_LAST_EMG_INFO = 380
CMD_GET_SMART_DI = 400
CMD_GET_SMART_DIS = 401
CMD_SET_SMART_DO = 402
CMD_SET_SMART_DOS = 403
CMD_GET_SMART_AI = 404
CMD_SET_SMART_AO = 405
CMD_GET_SMART_DO = 406
CMD_GET_SMART_DOS = 407
CMD_GET_SMART_AO = 408
CMD_SET_ENDTOOL_DO = 409 # v2.3.1 added
CMD_GET_ENDTOOL_DO = 410 # v2.3.1 added
CMD_GET_EXTIO_FTCAN_ROBOT_RAW = 420
CMD_GET_EXTIO_FTCAN_ROBOT_TRANS = 421
CMD_GET_EXTIO_FTCAN_CB_RAW = 422
CMD_GET_EXTIO_FTCAN_CB_TRANS = 423
CMD_READ_DIRECT_VARIABLE = 460
CMD_READ_DIRECT_VARIABLES = 461
CMD_WRITE_DIRECT_VARIABLE = 462
CMD_WRITE_DIRECT_VARIABLES = 463
CMD_SET_SYNC_MODE = 700
CMD_FOR_EXTENDED = 800
CMD_FOR_STREAMING = 801
CMD_SEND_KEYCOMMAND = 9996
CMD_READ_MEMORY = 9997
CMD_WRITE_MEMORY = 9998
CMD_ERROR = 9999
#########################################################################
# Extended DCP command #
#########################################################################
EXT_CMD_MOVE_TRAJ_BY_DATA = 1
EXT_CMD_MOVE_TRAJ_BY_TXT_DATA = 2
EXT_CMD_MOVE_TRAJ_BY_FILE = 3
EXT_CMD_MOVE_TRAJ_BY_TXT_FILE = 4
EXT_CMD_JMOVE_ABS_WAYPOINT_SET = 11
EXT_CMD_TMOVE_ABS_WAYPOINT_SET = 12
EXT_CMD_SET_JSON_PROG = 21
EXT_CMD_SET_JSON_PROG_START = 22
#########################################################################
# Error code #
#########################################################################
ERR_NONE = 0
ERR_NO_MATCHED_ROBOT = 1
ERR_NO_MATCHED_STEP = 2
ERR_HEADER_FORMAT = 4
ERR_OVER_DATA_SIZE = 5
ERR_NOT_SUPPORT_COMMAND = 6
ERR_UNKNOWN_COMMAND = 7
ERR_UNKNOWN_DATA = 8
ERR_PROCESS_FAILED = 9
ERR_PARSE_FAILED = 10
ERR_NO_MATCHED_PARAMETER = 11
ERR_NO_MATCHED_DATA_SIZE = 12
ERR_WRONG_ASCII_FORMAT = 13
ERR_ROBOT_MOVING_STATE = 14
ERR_ROBOT_PROGRAM_RUNNING = 15
ERR_ROBOT_MOVE_FAILED = 16
ERR_NO_DEFAULT_PROGRAM = 17
ERR_NO_CURRENT_PROGRAM = 18
ERR_CURRENT_PROGRAM_STATE = 19
ERR_EMG_STATE = 20
ERR_ROBOT_STATE = 21
ERR_ROBOT_PROGRAM_LOAD_FAILED = 22
ERR_DIRECT_VARIABLE_INVALID_ADDRESS = 23
ERR_DIRECT_VARIABLE_INVALID_FORMAT = 24
ERR_DIRECT_VARIABLE_REFNUM_LIMIT = 25
ERR_CONNECTION_EXCEPTION = 600
ERR_CONNECTION_TIMEOUT = 601
def err_to_string(err_cmd):
return {
ERR_NONE: "ErrorCode {}: No Error".format(err_cmd),
ERR_NO_MATCHED_ROBOT: "ErrorCode {}: Not matched robot".format(err_cmd),
ERR_NO_MATCHED_STEP: "ErrorCode {}: Not matched step".format(err_cmd),
ERR_HEADER_FORMAT: "ErrorCode {}: Invalid header format".format(err_cmd),
ERR_OVER_DATA_SIZE: "ErrorCode {}: Over data size".format(err_cmd),
ERR_NOT_SUPPORT_COMMAND: "ErrorCode {}: Unsupported command".format(err_cmd),
ERR_UNKNOWN_COMMAND: "ErrorCode {}: Unknown command".format(err_cmd),
ERR_UNKNOWN_DATA: "ErrorCode {}: Unknown data".format(err_cmd),
ERR_PROCESS_FAILED: "ErrorCode {}: Process fail".format(err_cmd),
ERR_PARSE_FAILED: "ErrorCode {}: Parsing fail (data error)".format(err_cmd),
ERR_NO_MATCHED_PARAMETER: "ErrorCode {}: Not matched data type".format(err_cmd),
ERR_NO_MATCHED_DATA_SIZE: "ErrorCode {}: Not matched data size ".format(
err_cmd
),
# ERR_WRONG_ASCII_FORMAT: "ErrorCode {}: ".format(err_cmd),
ERR_ROBOT_MOVING_STATE: "ErrorCode {}: Robot is moving".format(err_cmd),
ERR_ROBOT_PROGRAM_RUNNING: "ErrorCode {}: Robot program is running".format(
err_cmd
),
ERR_ROBOT_MOVE_FAILED: "ErrorCode {}: Move fail".format(err_cmd),
ERR_NO_DEFAULT_PROGRAM: "ErrorCode {}: No default program".format(err_cmd),
ERR_NO_CURRENT_PROGRAM: "ErrorCode {}: No loaded program".format(err_cmd),
ERR_CURRENT_PROGRAM_STATE: "ErrorCode {}: No proper program state".format(
err_cmd
),
ERR_EMG_STATE: "ErrorCode {}: Robot is emergency state".format(err_cmd),
ERR_ROBOT_STATE: "ErrorCode {}: Not proper robot state".format(err_cmd),
ERR_ROBOT_PROGRAM_LOAD_FAILED: "ErrorCode {}: Program load fail".format(
err_cmd
),
ERR_DIRECT_VARIABLE_INVALID_ADDRESS: "ErrorCode {}: Invalid direct variable address".format(
err_cmd
),
ERR_DIRECT_VARIABLE_INVALID_FORMAT: "ErrorCode {}: Invalid direct variable format".format(
err_cmd
),
ERR_DIRECT_VARIABLE_REFNUM_LIMIT: "ErrorCode {}: Limit of direct variable size".format(
err_cmd
),
}.get(err_cmd, "None")
#########################################################################
# Header Status Bit #
#########################################################################
# 0b 1000 0000 0000 0000 0000 0000 0000 0000
HEADER_STATUS_BIT_TASK_RUNNING = 0x80000000
# 0b 0100 0000 0000 0000 0000 0000 0000 0000
HEADER_STATUS_BIT_ROBOT_READY = 0x40000000
# 0b 0010 0000 0000 0000 0000 0000 0000 0000
HEADER_STATUS_BIT_EMG_STOPPED = 0x20000000
# 0b 0001 0000 0000 0000 0000 0000 0000 0000
HEADER_STATUS_BIT_COLLIDED = 0x10000000
# 0b 0000 1000 0000 0000 0000 0000 0000 0000
HEADER_STATUS_BIT_ERR_STATE = 0x08000000
HEADER_STATUS_BIT_BUSY = 0x04000000 # 0b 0000 0100 0000 0000 0000 0000 0000 0000
# 0b 0000 0010 0000 0000 0000 0000 0000 0000
HEADER_STATUS_BIT_MOVE_FINISHED = 0x02000000
HEADER_STATUS_BIT_HOME = 0x01000000 # 0b 0000 0001 0000 0000 0000 0000 0000 0000
HEADER_STATUS_BIT_ZERO = 0x00800000 # 0b 0000 0000 1000 0000 0000 0000 0000 0000
# 0b 0000 0000 0100 0000 0000 0000 0000 0000
HEADER_STATUS_BIT_IN_RESETTING = 0x00400000
# 0b 0000 0000 0000 0000 0000 0000 1000 0000
HEADER_STATUS_BIT_DIRECT_TEACHING = 0x00000080
# 0b 0000 0000 0000 0000 0000 0000 0100 0000
HEADER_STATUS_BIT_TEACHING = 0x00000040
# 0b 0000 0000 0000 0000 0000 0000 0010 0000
HEADER_STATUS_BIT_PROGRAM_RUNNING = 0x00000020
# 0b 0000 0000 0000 0000 0000 0000 0001 0000
HEADER_STATUS_BIT_PROGRAM_PAUSED = 0x00000010
# 0b 0000 0000 0000 0000 0000 0000 0000 1000
HEADER_STATUS_BIT_CONTY_CONNECTED = 0x00000008
#########################################################################
# DirectVariableType #
#########################################################################
DIRECT_VAR_TYPE_ERROR = -1
DIRECT_VAR_TYPE_BYTE = 0
DIRECT_VAR_TYPE_WORD = 1
DIRECT_VAR_TYPE_DWORD = 2
DIRECT_VAR_TYPE_LWORD = 3
DIRECT_VAR_TYPE_FLOAT = 4
DIRECT_VAR_TYPE_DFLOAT = 5
DIRECT_VAR_TYPE_MODBUS_REG = 10
###############################################################################
# Debug #
###############################################################################
def dump_buf(msg, buf, length):
if debugging:
PrintMsg.print_error(msg)
for i in range(0, length):
# PrintMsg.print_error(i, end=' - ')
PrintMsg.print_error(buf[i])
###############################################################################
# Decorators #
###############################################################################
def socket_connect(func):
def decorated(*args, **kwargs):
args[0].lock.acquire()
# args[0].connect()
func_out = func(*args, **kwargs)
# args[0].disconnect()
args[0].lock.release()
return func_out
return decorated
# gwkim
def tcp_command(cmd, response_type=None):
def decorate(func):
def decorated(*args):
global JOINT_DOF
_req_data = func(*args)
if _req_data is None:
error_code, _res_data, _ = args[0]._handle_command(cmd)
else:
error_code, _res_data, _ = args[0]._handle_command(
cmd, _req_data[0], _req_data[1]
)
if error_code:
return error_code
if response_type == "jointArr":
if JOINT_DOF == 6:
return np.array(_res_data.double6dArr).tolist()
else:
return np.array(_res_data.double7dArr).tolist()
elif response_type is not None:
return np.array(eval("_res_data." + response_type)).tolist()
else:
return None
return decorated
return decorate
###############################################################################
# Indy Client Class #
###############################################################################
class IndyDCPClient:
def __init__(self, server_ip, robot_name, robot_version=""):
global JOINT_DOF
self.__server_port = 6066
self.__sof_server = 0x12
self.__sof_client = 0x34
self.__step_ver = 0x02
self.__lock = Lock()
self.lock = self.__lock
self.sock_fd = socket.socket()
self.time_out = 10
self.v_invokeId = 0
self.server_ip = server_ip
self.robot_name = robot_name
self.robot_version = robot_version
JOINT_DOF = 7 if self.robot_name == ROBOT_INDYRP2 else 6
self.robot_status = RobotStatus()
def connect(self):
# self.__lock.acquire()
self.sock_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.sock_fd.connect((self.server_ip, self.__server_port))
except socket.error as e:
PrintMsg.print_error("Socket connection error: {}".format(e))
self.sock_fd.close()
# self.__lock.release()
return False
else:
if True:
PrintMsg.print_error(
"Connect: Server IP ({ser_ip})".format(ser_ip=self.server_ip)
)
# self.__lock.release()
return True
def disconnect(self):
self.sock_fd.close()
# self.__lock.release()
def shutdown(self):
self.sock_fd.shutdown(socket.SHUT_RDWR)
PrintMsg.print_error("Shut down")
def set_timeout_sec(self, time_out):
if time_out < 0:
PrintMsg.print_error("Invalid time out setting: {}<0".format(time_out))
self.time_out = time_out
def _send_message(self, buf, size):
dump_buf("SendBytes: ", buf, size)
total_sent = 0
while total_sent < size:
self.sock_fd.settimeout(self.time_out)
sent = self.sock_fd.send(buf[total_sent:size])
if sent == -1:
PrintMsg.print_error("Error: sent == -1")
return -1
elif sent == 0:
# self.__lock.release()
PrintMsg.print_error("Error: sent == 0")
return -1
total_sent = total_sent + sent
return 0
def _recv_message(self, buf, size):
chunks = []
bytes_recd = 0
while bytes_recd < size:
self.sock_fd.settimeout(self.time_out)
chunk = self.sock_fd.recv(size - bytes_recd)
if chunk == b"":
PrintMsg.print_error("Error: receive error")
memset(buf, 0, sizeof(buf))
# self.__lock.release()
self.shutdown()
return -1
chunks.append(chunk)
if (bytes_recd + len(chunk)) > sizeof(buf):
break
bytes_recd += len(chunk)
data = b"".join(chunks)
memset(buf, 0, sizeof(buf))
memmove(buf, data, len(data))
return buf
def check_header(self, req=HeaderCommand(), res=HeaderCommand(), err_code=ERR_NONE):
req_robot_name = np.array(req.val.robotName).tostring().decode("utf-8")
res_robot_name = np.array(res.val.robotName).tostring().decode("utf-8")
if req_robot_name != res_robot_name:
PrintMsg.print_error(
"Header check fail (robotName): Request {_req}, Response {_res}".format(
_req=req_robot_name, _res=res_robot_name
)
)
if req.val.stepInfo != res.val.stepInfo:
PrintMsg.print_error(
"Header check fail (stepInfo): Request {_req}, Response {_res}".format(
_req=req.val.stepInfo, _res=res.val.stepInfo
)
)
if req.val.invokeId != res.val.invokeId:
PrintMsg.print_error(
"Header check fail (invokeId): Request {_req}, Response {_res}".format(
_req=req.val.invokeId, _res=res.val.invokeId
)
)
if res.val.sof != self.__sof_server:
PrintMsg.print_error(
"Header check fail (sof): Request {_req}, Response {_res}".format(
_req=self.__sof_server, _res=res.val.sof
)
)
if req.val.cmdId != res.val.cmdId:
PrintMsg.print_error(
"Header check fail (cmdId): Request {_req}, Response {_res}".format(
_req=req.val.cmdId, _res=res.val.cmdId
)
)
if res.val.cmdId == CMD_ERROR:
PrintMsg.print_error(err_to_string(err_code))
return err_code
return ERR_NONE
def parse_robot_status(self, status):
status_str = bin(status).lstrip("0b")
# self.robot_status.is_robot_running = int(status_str[0])
self.robot_status.is_robot_ready = int(status_str[1])
self.robot_status.is_emergency_stop = int(status_str[2])
self.robot_status.is_collided = int(status_str[3])
self.robot_status.is_error_state = int(status_str[4])
self.robot_status.is_busy = int(status_str[5])
self.robot_status.is_move_finished = int(status_str[6])
self.robot_status.is_home = int(status_str[7])
self.robot_status.is_zero = int(status_str[8])
self.robot_status.is_in_resetting = int(status_str[9])
self.robot_status.is_teaching_mode = int(status_str[25])
self.robot_status.is_direct_teaching_mode = int(status_str[24])
self.robot_status.is_program_running = int(status_str[26])
self.robot_status.is_program_paused = int(status_str[27])
self.robot_status.is_conty_connected = int(status_str[28])
@socket_connect
def _handle_command(self, cmd, req_data=Data(), req_data_size=0):
write_buffer = (c_char * 1024)()
read_buffer = (c_char * 1024)()
if req_data_size > SIZE_DATA_TCP_MAX or req_data_size < 0:
self.disconnect()
raise Exception(
"Request size is invalid {}: Disconnected".format(req_data_size)
)
# Make header
req_header = HeaderCommand()
memset(req_header.byte, 0, sizeof(req_header.byte))
b_str_robot_name = self.robot_name.encode("ascii")
memmove(
req_header.val.robotName, c_char_p(b_str_robot_name), len(self.robot_name)
)
b_str_robot_ver = self.robot_version.encode("ascii")
memmove(
req_header.val.robotVersion,
c_char_p(b_str_robot_ver),
len(self.robot_version),
)
req_header.val.stepInfo = self.__step_ver
req_header.val.sof = self.__sof_client
req_header.val.cmdId = cmd
req_header.val.dataSize = req_data_size
self.v_invokeId += 1
req_header.val.invokeId = self.v_invokeId
# Send packet to socket
memmove(write_buffer, req_header.byte, SIZE_HEADER_COMMAND)
self._send_message(write_buffer, SIZE_HEADER_COMMAND)
if req_data_size > 0:
if hasattr(req_data, "byte"):
memmove(write_buffer, req_data.byte, req_data_size)
else:
# For execute command move
memmove(write_buffer, req_data, req_data_size)
self._send_message(write_buffer, req_data_size)
# Recv header from socket
res_header = HeaderCommand()
read_buffer = self._recv_message(read_buffer, SIZE_HEADER_COMMAND)
memmove(res_header.byte, read_buffer, SIZE_HEADER_COMMAND)
# Recv data from socket
res_data = Data()
res_data_size = res_header.val.dataSize
if res_data_size > SIZE_DATA_TCP_MAX or res_data_size < 0:
PrintMsg.print_error(
"Response data size is invalid {} (max: {}): Disconnected".format(
res_data_size, SIZE_DATA_TCP_MAX
)
)
self.disconnect()
elif res_data_size > 0:
read_buffer = self._recv_message(read_buffer, res_data_size)
memmove(res_data.byte, read_buffer, res_data_size)
# Check header and error
error_code = self.check_header(req_header, res_header, res_data.intVal)
# Get robot status from header's reserved field
self.parse_robot_status(res_header.val.status)
return error_code, res_data, res_data_size
@socket_connect
def _handle_extended_command(self, ext_cmd, req_ext_data, req_ext_data_size=0):
ret = False
write_buffer = (c_char * 1024)()
read_buffer = (c_char * 1024)()
if req_ext_data_size > sys.maxsize or req_ext_data_size < 0:
self.disconnect()
PrintMsg.print_error("Send data size error")
if req_ext_data_size > 0 and req_ext_data is None:
PrintMsg.print_error("Send data error: Null data")
# Make request header
req_header = HeaderCommand()
memset(req_header.byte, 0, sizeof(req_header.byte))
b_str_robot_name = self.robot_name.encode("ascii")
memmove(
req_header.val.robotName, c_char_p(b_str_robot_name), len(self.robot_name)
)
b_str_robot_ver = self.robot_version.encode("ascii")
memmove(
req_header.val.robotVersion,
c_char_p(b_str_robot_ver),
len(self.robot_version),
)
req_header.val.stepInfo = self.__step_ver
req_header.val.sof = self.__sof_client
req_header.val.cmdId = CMD_FOR_EXTENDED
req_header.val.dataSize = 8
self.v_invokeId += 1
req_header.val.invokeId = self.v_invokeId
# Make request data
req_data = Data()
req_data.int2dArr[0] = np.array(ext_cmd)
req_data.int2dArr[1] = np.array(req_ext_data_size)
req_data_size = req_header.val.dataSize
# Send packet to socket
memmove(write_buffer, req_header.byte, SIZE_HEADER_COMMAND)
self._send_message(write_buffer, SIZE_HEADER_COMMAND)
memmove(write_buffer, req_data.byte, req_data_size)
self._send_message(write_buffer, req_data_size)
# Send extended packet to socket
if req_ext_data_size > 0:
self._send_message(req_ext_data, req_ext_data_size)
# Recv header from socket
res_header = HeaderCommand()
read_buffer = self._recv_message(read_buffer, SIZE_HEADER_COMMAND)
memmove(res_header.byte, read_buffer, SIZE_HEADER_COMMAND)
# Recv data from socket
res_data = Data()
res_data_size = res_header.val.dataSize
if res_data_size > SIZE_DATA_TCP_MAX or res_data_size < 0:
self.disconnect()
elif res_data_size > 0:
read_buffer = self._recv_message(read_buffer, res_data_size)
memmove(res_data.byte, read_buffer, res_data_size)
# Check header and error
ret = self.check_header(req_header, res_header, res_data)
# Recv extended data from socket
res_ext_data = Data()
res_ext_data_size = res_data.int2dArr[1]
if res_ext_data_size < 0 or res_ext_data_size > sys.maxsize:
self.disconnect()
PrintMsg.print_error("Recv data error: size")
elif res_data.int2dArr[0] is not ext_cmd:
self.disconnect()
PrintMsg.print_error(
"Recv data error: ext_cmd {}/{}".format(res_data.int2dArr[0], ext_cmd)
)
if res_ext_data_size > 0:
self._recv_message(res_ext_data, res_ext_data_size)
if not ret:
return ret
else:
return ret, res_data, res_data_size
# gwkim
############################################################################
## Robot command function (Check all) #
############################################################################
@tcp_command(CMD_CHECK)
def check(self):
pass
# Get robot status
def get_robot_status(self):
self.check()
res = {
"ready": self.robot_status.is_robot_ready,
"emergency": self.robot_status.is_emergency_stop,
"collision": self.robot_status.is_collided,
"error": self.robot_status.is_error_state,
"busy": self.robot_status.is_busy,
"movedone": self.robot_status.is_move_finished,
"home": self.robot_status.is_home,
"zero": self.robot_status.is_zero,
"resetting": self.robot_status.is_in_resetting,
"teaching": self.robot_status.is_teaching_mode,
"direct_teaching": self.robot_status.is_direct_teaching_mode,
}
return res
@tcp_command(CMD_IS_CONTY_CONNECTED, "boolVal")
def is_conty_connected(self):
pass
# Program state
def get_program_state(self):
self.check()
res = {
"running": self.robot_status.is_program_running,
"pause": self.robot_status.is_program_paused,
}
return res
# Reset/Stop
@tcp_command(CMD_EMERGENCY_STOP)
def stop_emergency(self):
pass
@tcp_command(CMD_RESET_ROBOT)
def reset_robot(self):
pass
# Joint/Servo command
@tcp_command(CMD_SET_SERVO)
def set_servo(self, arr):
data = Data()
data_size = JOINT_DOF
for i in range(JOINT_DOF):
data.bool6dArr[i] = arr[i]
return (data, data_size)
@tcp_command(CMD_SET_BRAKE)
def set_brake(self, arr):
data = Data()
data_size = JOINT_DOF
for i in range(JOINT_DOF):
data.bool6dArr[i] = arr[i]
return (data, data_size)
def direct_teaching(self, mode):
if mode:
self._handle_command(CMD_CHANGE_DIRECT_TEACHING)
else:
self._handle_command(CMD_FINISH_DIRECT_TEACHING)
# Set global robot variables
@tcp_command(CMD_SET_DEFAULT_TCP)
def set_default_tcp(self, tcp):
data = Data()
data_size = 6 * 8
for i in range(JOINT_DOF):
data.double6dArr[i] = tcp[i]
return (data, data_size)
@tcp_command(CMD_RESET_DEFAULT_TCP)
def reset_default_tcp(self):
pass
@tcp_command(CMD_SET_COMP_TCP)
def set_tcp_comp(self, tcp):
data = Data()
data_size = 6 * 8
for i in range(JOINT_DOF):
data.double6dArr[i] = tcp[i]
return (data, data_size)
@tcp_command(CMD_RESET_COMP_TCP)
def reset_tcp_compensation(self):
pass
@tcp_command(CMD_SET_REFFRAME)
def set_reference_frame(self, ref):
data = Data()
data_size = 6 * 8
for i in range(JOINT_DOF):
data.double6dArr[i] = ref[i]
return (data, data_size)
@tcp_command(CMD_RESET_REFFRAME)
def reset_reference_frame(self):
pass
@tcp_command(CMD_SET_COLLISION_LEVEL)
def set_collision_level(self, level):
data = Data()
data_size = 4
data.intVal = level
return (data, data_size)
@tcp_command(CMD_SET_JOINT_BOUNDARY)
def set_joint_vel_level(self, level):
data = Data()
data_size = 4
data.intVal = level
return (data, data_size)
@tcp_command(CMD_SET_TASK_BOUNDARY)
def set_task_vel_level(self, level):
data = Data()
data_size = 4
data.intVal = level
return (data, data_size)
@tcp_command(CMD_SET_JOINT_WTIME)
def set_joint_waypoint_time(self, wp_time):
data = Data()
data_size = 8
data.doubleVal = wp_time
return (data, data_size)
@tcp_command(CMD_SET_TASK_WTIME)
def set_task_waypoint_time(self, wp_time):
data = Data()
data_size = 8
data.doubleVal = wp_time
return (data, data_size)
@tcp_command(CMD_SET_TASK_CMODE)
def set_task_base(self, mode):
# 0: reference frame, 1: TCO
data = Data()
data_size = 4
data.intVal = mode
return (data, data_size)
@tcp_command(CMD_SET_JOINT_BLEND_RADIUS)
def set_joint_blend_radius(self, radius):
data = Data()
data_size = 8
data.doubleVal = radius
return (data, data_size)
@tcp_command(CMD_SET_TASK_BLEND_RADIUS)
def set_task_blend_radius(self, radius):
data = Data()
data_size = 8
data.doubleVal = radius
return (data, data_size)
# Get global robot variables
@tcp_command(CMD_GET_DEFAULT_TCP, "double6dArr")
def get_default_tcp(self):
pass
@tcp_command(CMD_GET_COMP_TCP, "double6dArr")
def get_tcp_comp(self):
pass
@tcp_command(CMD_GET_REFFRAME, "double6dArr")
def get_reference_frame(self):
pass
@tcp_command(CMD_GET_COLLISION_LEVEL, "intVal")
def get_collision_level(self):
pass
@tcp_command(CMD_GET_JOINT_BOUNDARY, "intVal")
def get_joint_vel_level(self):
pass
@tcp_command(CMD_GET_TASK_BOUNDARY, "intVal")
def get_task_vel_level(self):
pass
@tcp_command(CMD_GET_JOINT_WTIME, "doubleVal")
def get_joint_waypoint_time(self):
pass
@tcp_command(CMD_GET_TASK_WTIME, "doubleVal")
def get_task_waypoint_time(self):
pass
@tcp_command(CMD_GET_TASK_CMODE, "intVal")
def get_task_base(self):
pass
@tcp_command(CMD_GET_JOINT_BLEND_RADIUS, "doubleVal")
def get_joint_blend_radius(self):
pass
@tcp_command(CMD_GET_TASK_BLEND_RADIUS, "doubleVal")
def get_task_blend_radius(self):
pass
@tcp_command(CMD_GET_RUNNING_TIME, "doubleVal")
def get_robot_running_time(self):
pass
@tcp_command(CMD_GET_CMODE, "intVal")
def get_cmode(self):
pass
def get_servo_state(self):
error_code, _res_data, _res_data_size = self._handle_command(
CMD_GET_JOINT_STATE
)
if error_code:
return error_code
result = | np.array(_res_data.charArr) | numpy.array |
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib import cm, colors
from astropy.modeling import models, fitting
# Reading in all data files at once
import glob
path_normal ='/projects/p30137/ageller/testing/EBLSST/add_m5/output_files'
allFiles_normal = glob.glob(path_normal + "/*.csv")
path_fast = '/projects/p30137/ageller/testing/EBLSST/add_m5/fast/old/output_files'
allFiles_fast = glob.glob(path_fast + "/*.csv")
path_obsDist = '/projects/p30137/ageller/testing/EBLSST/add_m5/fast/old/obsDist/output_files'
allFiles_obsDist = glob.glob(path_obsDist + "/*.csv")
N_totalnormal_array = []
N_totalobservablenormal_array = []
N_totalrecoverablenormal_array = []
N_totalnormal_array_03 = []
N_totalobservablenormal_array_03 = []
N_totalrecoverablenormal_array_03 = []
N_totalnormal_array_1 = []
N_totalobservablenormal_array_1 = []
N_totalrecoverablenormal_array_1 = []
N_totalnormal_array_10 = []
N_totalobservablenormal_array_10 = []
N_totalrecoverablenormal_array_10 = []
N_totalnormal_array_30 = []
N_totalobservablenormal_array_30 = []
N_totalrecoverablenormal_array_30 = []
N_totalnormal_array_100 = []
N_totalobservablenormal_array_100 = []
N_totalrecoverablenormal_array_100 = []
N_totalnormal_array_1000 = []
N_totalobservablenormal_array_1000 = []
N_totalrecoverablenormal_array_1000 = []
N_totalnormal22_array = []
N_totalobservablenormal22_array = []
N_totalrecoverablenormal22_array = []
N_totalnormal22_array_03 = []
N_totalobservablenormal22_array_03 = []
N_totalrecoverablenormal22_array_03 = []
N_totalnormal22_array_1 = []
N_totalobservablenormal22_array_1 = []
N_totalrecoverablenormal22_array_1 = []
N_totalnormal22_array_10 = []
N_totalobservablenormal22_array_10 = []
N_totalrecoverablenormal22_array_10 = []
N_totalnormal22_array_30 = []
N_totalobservablenormal22_array_30 = []
N_totalrecoverablenormal22_array_30 = []
N_totalnormal22_array_100 = []
N_totalobservablenormal22_array_100 = []
N_totalrecoverablenormal22_array_100 = []
N_totalnormal22_array_1000 = []
N_totalobservablenormal22_array_1000 = []
N_totalrecoverablenormal22_array_1000 = []
N_totalnormal195_array = []
N_totalobservablenormal195_array = []
N_totalrecoverablenormal195_array = []
N_totalnormal195_array_03 = []
N_totalobservablenormal195_array_03 = []
N_totalrecoverablenormal195_array_03 = []
N_totalnormal195_array_1 = []
N_totalobservablenormal195_array_1 = []
N_totalrecoverablenormal195_array_1 = []
N_totalnormal195_array_10 = []
N_totalobservablenormal195_array_10 = []
N_totalrecoverablenormal195_array_10 = []
N_totalnormal195_array_30 = []
N_totalobservablenormal195_array_30 = []
N_totalrecoverablenormal195_array_30 = []
N_totalnormal195_array_100 = []
N_totalobservablenormal195_array_100 = []
N_totalrecoverablenormal195_array_100 = []
N_totalnormal195_array_1000 = []
N_totalobservablenormal195_array_1000 = []
N_totalrecoverablenormal195_array_1000 = []
N_totalfast_array = []
N_totalobservablefast_array = []
N_totalrecoverablefast_array = []
N_totalfast_array_03 = []
N_totalobservablefast_array_03 = []
N_totalrecoverablefast_array_03 = []
N_totalfast_array_1 = []
N_totalobservablefast_array_1 = []
N_totalrecoverablefast_array_1 = []
N_totalfast_array_10 = []
N_totalobservablefast_array_10 = []
N_totalrecoverablefast_array_10 = []
N_totalfast_array_30 = []
N_totalobservablefast_array_30 = []
N_totalrecoverablefast_array_30 = []
N_totalfast_array_100 = []
N_totalobservablefast_array_100 = []
N_totalrecoverablefast_array_100 = []
N_totalfast_array_1000 = []
N_totalobservablefast_array_1000 = []
N_totalrecoverablefast_array_1000 = []
N_totalfast22_array = []
N_totalobservablefast22_array = []
N_totalrecoverablefast22_array = []
N_totalfast22_array_03 = []
N_totalobservablefast22_array_03 = []
N_totalrecoverablefast22_array_03 = []
N_totalfast22_array_1 = []
N_totalobservablefast22_array_1 = []
N_totalrecoverablefast22_array_1 = []
N_totalfast22_array_10 = []
N_totalobservablefast22_array_10 = []
N_totalrecoverablefast22_array_10 = []
N_totalfast22_array_30 = []
N_totalobservablefast22_array_30 = []
N_totalrecoverablefast22_array_30 = []
N_totalfast22_array_100 = []
N_totalobservablefast22_array_100 = []
N_totalrecoverablefast22_array_100 = []
N_totalfast22_array_1000 = []
N_totalobservablefast22_array_1000 = []
N_totalrecoverablefast22_array_1000 = []
N_totalfast195_array = []
N_totalobservablefast195_array = []
N_totalrecoverablefast195_array = []
N_totalfast195_array_03 = []
N_totalobservablefast195_array_03 = []
N_totalrecoverablefast195_array_03 = []
N_totalfast195_array_1 = []
N_totalobservablefast195_array_1 = []
N_totalrecoverablefast195_array_1 = []
N_totalfast195_array_10 = []
N_totalobservablefast195_array_10 = []
N_totalrecoverablefast195_array_10 = []
N_totalfast195_array_30 = []
N_totalobservablefast195_array_30 = []
N_totalrecoverablefast195_array_30 = []
N_totalfast195_array_100 = []
N_totalobservablefast195_array_100 = []
N_totalrecoverablefast195_array_100 = []
N_totalfast195_array_1000 = []
N_totalobservablefast195_array_1000 = []
N_totalrecoverablefast195_array_1000 = []
N_totalobsDist_array = []
N_totalobservableobsDist_array = []
N_totalrecoverableobsDist_array = []
N_totalobsDist_array_03 = []
N_totalobservableobsDist_array_03 = []
N_totalrecoverableobsDist_array_03 = []
N_totalobsDist_array_1 = []
N_totalobservableobsDist_array_1 = []
N_totalrecoverableobsDist_array_1 = []
N_totalobsDist_array_10 = []
N_totalobservableobsDist_array_10 = []
N_totalrecoverableobsDist_array_10 = []
N_totalobsDist_array_30 = []
N_totalobservableobsDist_array_30 = []
N_totalrecoverableobsDist_array_30 = []
N_totalobsDist_array_100 = []
N_totalobservableobsDist_array_100 = []
N_totalrecoverableobsDist_array_100 = []
N_totalobsDist_array_1000 = []
N_totalobservableobsDist_array_1000 = []
N_totalrecoverableobsDist_array_1000 = []
N_totalobsDist22_array = []
N_totalobservableobsDist22_array = []
N_totalrecoverableobsDist22_array = []
N_totalobsDist22_array_03 = []
N_totalobservableobsDist22_array_03 = []
N_totalrecoverableobsDist22_array_03 = []
N_totalobsDist22_array_1 = []
N_totalobservableobsDist22_array_1 = []
N_totalrecoverableobsDist22_array_1 = []
N_totalobsDist22_array_10 = []
N_totalobservableobsDist22_array_10 = []
N_totalrecoverableobsDist22_array_10 = []
N_totalobsDist22_array_30 = []
N_totalobservableobsDist22_array_30 = []
N_totalrecoverableobsDist22_array_30 = []
N_totalobsDist22_array_100 = []
N_totalobservableobsDist22_array_100 = []
N_totalrecoverableobsDist22_array_100 = []
N_totalobsDist22_array_1000 = []
N_totalobservableobsDist22_array_1000 = []
N_totalrecoverableobsDist22_array_1000 = []
N_totalobsDist195_array = []
N_totalobservableobsDist195_array = []
N_totalrecoverableobsDist195_array = []
N_totalobsDist195_array_03 = []
N_totalobservableobsDist195_array_03 = []
N_totalrecoverableobsDist195_array_03 = []
N_totalobsDist195_array_1 = []
N_totalobservableobsDist195_array_1 = []
N_totalrecoverableobsDist195_array_1 = []
N_totalobsDist195_array_10 = []
N_totalobservableobsDist195_array_10 = []
N_totalrecoverableobsDist195_array_10 = []
N_totalobsDist195_array_30 = []
N_totalobservableobsDist195_array_30 = []
N_totalrecoverableobsDist195_array_30 = []
N_totalobsDist195_array_100 = []
N_totalobservableobsDist195_array_100 = []
N_totalrecoverableobsDist195_array_100 = []
N_totalobsDist195_array_1000 = []
N_totalobservableobsDist195_array_1000 = []
N_totalrecoverableobsDist195_array_1000 = []
def fitRagfb():
x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins, and using this: https:/sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html
y = [0.20, 0.35, 0.50, 0.70, 0.75]
init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.)
fitter = fitting.LevMarLSQFitter()
fit = fitter(init, x, y)
return fit
fbFit= fitRagfb()
mbins = np.arange(0,10, 0.1, dtype='float')
cutP = 0.10 #condition on recoverability/tolerance
for filenormal_ in sorted(allFiles_normal):
filename = filenormal_[60:]
fileid = filename.strip('output_file.csv')
print ("I'm starting " + fileid)
datnormal = pd.read_csv(filenormal_, sep = ',', header=2)
PeriodIn = datnormal['p'] # input period -- 'p' in data file
##########################################################
datnormal1 = pd.read_csv(filenormal_, sep = ',', header=0, nrows=1)
N_tri = datnormal1["NstarsTRILEGAL"][0]
#print("N_tri = ", N_tri)
Nall = len(PeriodIn)
m1hAll0, m1b = np.histogram(datnormal["m1"], bins=mbins)
dm1 = np.diff(m1b)
m1val = m1b[:-1] + dm1/2.
fb = np.sum(m1hAll0/Nall*fbFit(m1val))
N_mult = N_tri*fb
##########################################################
if len(PeriodIn) == 0.:
continue
if N_tri == 0:
continue
else:
PeriodOut = datnormal['LSM_PERIOD'] #LSM_PERIOD in data file
appMagMean = datnormal['appMagMean'] #apparent magnitude, will use to make cuts for 24 (default), 22, and then Kepler's range (?? -- brighter than LSST can manage-- to 19) OR 19.5 (SNR = 10)
observable = datnormal.loc[PeriodOut != -999].index
observable_03 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999)].index
observable_1 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999)].index
observable_10 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999)].index
observable_30 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999)].index
observable_100 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999)].index
observable_1000 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999)].index
observable_22 = datnormal.loc[(PeriodOut != -999) & (appMagMean <= 22.)].index
observable_03_22 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_1_22 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_10_22 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_30_22 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_100_22 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_1000_22 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_195 = datnormal.loc[(PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_03_195 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_1_195 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_10_195 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_30_195 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_100_195 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_1000_195 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
fullP = abs(PeriodOut - PeriodIn)/PeriodIn
halfP = abs(PeriodOut - 0.5*PeriodIn)/(0.5*PeriodIn)
twiceP = abs(PeriodOut - 2*PeriodIn)/(2*PeriodIn)
recoverable = datnormal.loc[(PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_03 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_1 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_10 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_30 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_100 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_1000 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_22 = datnormal.loc[(PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_03_22 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_1_22 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_10_22 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_30_22 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_100_22 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_1000_22 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_195 = datnormal.loc[(PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_03_195 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_1_195 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_10_195 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_30_195 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_100_195 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_1000_195 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
P03 = datnormal.loc[PeriodIn <= 0.3].index
P1 = datnormal.loc[PeriodIn <= 1].index
P10 = datnormal.loc[PeriodIn <= 10].index
P30 = datnormal.loc[PeriodIn <= 30].index
P100 = datnormal.loc[PeriodIn <= 100].index
P1000 = datnormal.loc[PeriodIn <= 1000].index
P_22 = datnormal.loc[appMagMean <= 22.].index
P03_22 = datnormal.loc[(PeriodIn <= 0.3) & (appMagMean <= 22.)].index
P1_22 = datnormal.loc[(PeriodIn <= 1) & (appMagMean <= 22.)].index
P10_22 = datnormal.loc[(PeriodIn <= 10) & (appMagMean <= 22.)].index
P30_22 = datnormal.loc[(PeriodIn <= 30) & (appMagMean <= 22.)].index
P100_22 = datnormal.loc[(PeriodIn <= 100) & (appMagMean <= 22.)].index
P1000_22 = datnormal.loc[(PeriodIn <= 1000) & (appMagMean <= 22.)].index
P_195 = datnormal.loc[appMagMean <= 19.5].index
P03_195 = datnormal.loc[(PeriodIn <= 0.3) & (appMagMean <= 19.5)].index
P1_195 = datnormal.loc[(PeriodIn <= 1) & (appMagMean <= 19.5)].index
P10_195 = datnormal.loc[(PeriodIn <= 10) & (appMagMean <= 19.5)].index
P30_195 = datnormal.loc[(PeriodIn <= 30) & (appMagMean <= 19.5)].index
P100_195 = datnormal.loc[(PeriodIn <= 100) & (appMagMean <= 19.5)].index
P1000_195 = datnormal.loc[(PeriodIn <= 1000) & (appMagMean <= 19.5)].index
N_all = (len(PeriodIn)/len(PeriodIn))*N_mult
N_all03 = (len(P03)/len(PeriodIn))*N_mult
N_all1 = (len(P1)/len(PeriodIn))*N_mult
N_all10 = (len(P10)/len(PeriodIn))*N_mult
N_all30 = (len(P30)/len(PeriodIn))*N_mult
N_all100 = (len(P100)/len(PeriodIn))*N_mult
N_all1000 = (len(P1000)/len(PeriodIn))*N_mult
N_all_22 = (len(P_22)/len(PeriodIn))*N_mult
N_all03_22 = (len(P03_22)/len(PeriodIn))*N_mult
N_all1_22 = (len(P1_22)/len(PeriodIn))*N_mult
N_all10_22 = (len(P10_22)/len(PeriodIn))*N_mult
N_all30_22 = (len(P30_22)/len(PeriodIn))*N_mult
N_all100_22 = (len(P100_22)/len(PeriodIn))*N_mult
N_all1000_22 = (len(P1000_22)/len(PeriodIn))*N_mult
N_all_195 = (len(P_195)/len(PeriodIn))*N_mult
N_all03_195 = (len(P03_195)/len(PeriodIn))*N_mult
N_all1_195 = (len(P1_195)/len(PeriodIn))*N_mult
N_all10_195 = (len(P10_195)/len(PeriodIn))*N_mult
N_all30_195 = (len(P30_195)/len(PeriodIn))*N_mult
N_all100_195 = (len(P100_195)/len(PeriodIn))*N_mult
N_all1000_195 = (len(P1000_195)/len(PeriodIn))*N_mult
N_obs = (len(observable)/len(PeriodIn))*N_mult
N_obs03 = (len(observable_03)/len(PeriodIn))*N_mult
N_obs1 = (len(observable_1)/len(PeriodIn))*N_mult
N_obs10 = (len(observable_10)/len(PeriodIn))*N_mult
N_obs30 = (len(observable_30)/len(PeriodIn))*N_mult
N_obs100 = (len(observable_100)/len(PeriodIn))*N_mult
N_obs1000 = (len(observable_1000)/len(PeriodIn))*N_mult
N_obs_22 = (len(observable_22)/len(PeriodIn))*N_mult
N_obs03_22 = (len(observable_03_22)/len(PeriodIn))*N_mult
N_obs1_22 = (len(observable_1_22)/len(PeriodIn))*N_mult
N_obs10_22 = (len(observable_10_22)/len(PeriodIn))*N_mult
N_obs30_22 = (len(observable_30_22)/len(PeriodIn))*N_mult
N_obs100_22 = (len(observable_100_22)/len(PeriodIn))*N_mult
N_obs1000_22 = (len(observable_1000_22)/len(PeriodIn))*N_mult
N_obs_195 = (len(observable_195)/len(PeriodIn))*N_mult
N_obs03_195 = (len(observable_03_195)/len(PeriodIn))*N_mult
N_obs1_195 = (len(observable_1_195)/len(PeriodIn))*N_mult
N_obs10_195 = (len(observable_10_195)/len(PeriodIn))*N_mult
N_obs30_195 = (len(observable_30_195)/len(PeriodIn))*N_mult
N_obs100_195 = (len(observable_100_195)/len(PeriodIn))*N_mult
N_obs1000_195 = (len(observable_1000_195)/len(PeriodIn))*N_mult
N_rec = (len(recoverable)/len(PeriodIn))*N_mult
N_rec03 = (len(recoverable_03)/len(PeriodIn))*N_mult
N_rec1 = (len(recoverable_1)/len(PeriodIn))*N_mult
N_rec10 = (len(recoverable_10)/len(PeriodIn))*N_mult
N_rec30 = (len(recoverable_30)/len(PeriodIn))*N_mult
N_rec100 = (len(recoverable_100)/len(PeriodIn))*N_mult
N_rec1000 = (len(recoverable_1000)/len(PeriodIn))*N_mult
N_rec_22 = (len(recoverable_22)/len(PeriodIn))*N_mult
N_rec03_22 = (len(recoverable_03_22)/len(PeriodIn))*N_mult
N_rec1_22 = (len(recoverable_1_22)/len(PeriodIn))*N_mult
N_rec10_22 = (len(recoverable_10_22)/len(PeriodIn))*N_mult
N_rec30_22 = (len(recoverable_30_22)/len(PeriodIn))*N_mult
N_rec100_22 = (len(recoverable_100_22)/len(PeriodIn))*N_mult
N_rec1000_22 = (len(recoverable_1000_22)/len(PeriodIn))*N_mult
N_rec_195 = (len(recoverable_195)/len(PeriodIn))*N_mult
N_rec03_195 = (len(recoverable_03_195)/len(PeriodIn))*N_mult
N_rec1_195 = (len(recoverable_1_195)/len(PeriodIn))*N_mult
N_rec10_195 = (len(recoverable_10_195)/len(PeriodIn))*N_mult
N_rec30_195 = (len(recoverable_30_195)/len(PeriodIn))*N_mult
N_rec100_195 = (len(recoverable_100_195)/len(PeriodIn))*N_mult
N_rec1000_195 = (len(recoverable_1000_195)/len(PeriodIn))*N_mult
N_totalnormal_array.append(float(N_all))
N_totalobservablenormal_array.append(float(N_obs))
N_totalrecoverablenormal_array.append(float(N_rec))
N_totalnormal_array_03.append(float(N_all03))
N_totalobservablenormal_array_03.append(float(N_obs03))
N_totalrecoverablenormal_array_03.append(float(N_rec03))
N_totalnormal_array_1.append(float(N_all1))
N_totalobservablenormal_array_1.append(float(N_obs1))
N_totalrecoverablenormal_array_1.append(float(N_rec1))
N_totalnormal_array_10.append(float(N_all10))
N_totalobservablenormal_array_10.append(float(N_obs10))
N_totalrecoverablenormal_array_10.append(float(N_rec10))
N_totalnormal_array_30.append(float(N_all30))
N_totalobservablenormal_array_30.append(float(N_obs30))
N_totalrecoverablenormal_array_30.append(float(N_rec30))
N_totalnormal_array_100.append(float(N_all100))
N_totalobservablenormal_array_100.append(float(N_obs100))
N_totalrecoverablenormal_array_100.append(float(N_rec100))
N_totalnormal_array_1000.append(float(N_all1000))
N_totalobservablenormal_array_1000.append(float(N_obs1000))
N_totalrecoverablenormal_array_1000.append(float(N_rec1000))
N_totalnormal22_array.append(float(N_all_22))
N_totalobservablenormal22_array.append(float(N_obs_22))
N_totalrecoverablenormal22_array.append(float(N_rec_22))
N_totalnormal22_array_03.append(float(N_all03_22))
N_totalobservablenormal22_array_03.append(float(N_obs03_22))
N_totalrecoverablenormal22_array_03.append(float(N_rec03_22))
N_totalnormal22_array_1.append(float(N_all1_22))
N_totalobservablenormal22_array_1.append(float(N_obs1_22))
N_totalrecoverablenormal22_array_1.append(float(N_rec1_22))
N_totalnormal22_array_10.append(float(N_all10_22))
N_totalobservablenormal22_array_10.append(float(N_obs10_22))
N_totalrecoverablenormal22_array_10.append(float(N_rec10_22))
N_totalnormal22_array_30.append(float(N_all30_22))
N_totalobservablenormal22_array_30.append(float(N_obs30_22))
N_totalrecoverablenormal22_array_30.append(float(N_rec30_22))
N_totalnormal22_array_100.append(float(N_all100_22))
N_totalobservablenormal22_array_100.append(float(N_obs100_22))
N_totalrecoverablenormal22_array_100.append(float(N_rec100_22))
N_totalnormal22_array_1000.append(float(N_all1000_22))
N_totalobservablenormal22_array_1000.append(float(N_obs1000_22))
N_totalrecoverablenormal22_array_1000.append(float(N_rec1000_22))
N_totalnormal195_array.append(float(N_all_195))
N_totalobservablenormal195_array.append(float(N_obs_195))
N_totalrecoverablenormal195_array.append(float(N_rec_195))
N_totalnormal195_array_03.append(float(N_all03_195))
N_totalobservablenormal195_array_03.append(float(N_obs03_195))
N_totalrecoverablenormal195_array_03.append(float(N_rec03_195))
N_totalnormal195_array_1.append(float(N_all1_195))
N_totalobservablenormal195_array_1.append(float(N_obs1_195))
N_totalrecoverablenormal195_array_1.append(float(N_rec1_195))
N_totalnormal195_array_10.append(float(N_all10_195))
N_totalobservablenormal195_array_10.append(float(N_obs10_195))
N_totalrecoverablenormal195_array_10.append(float(N_rec10_195))
N_totalnormal195_array_30.append(float(N_all30_195))
N_totalobservablenormal195_array_30.append(float(N_obs30_195))
N_totalrecoverablenormal195_array_30.append(float(N_rec30_195))
N_totalnormal195_array_100.append(float(N_all100_195))
N_totalobservablenormal195_array_100.append(float(N_obs100_195))
N_totalrecoverablenormal195_array_100.append(float(N_rec100_195))
N_totalnormal195_array_1000.append(float(N_all1000_195))
N_totalobservablenormal195_array_1000.append(float(N_obs1000_195))
N_totalrecoverablenormal195_array_1000.append(float(N_rec1000_195))
N_totalnormal = np.sum(N_totalnormal_array)
N_totalnormal_03 = np.sum(N_totalnormal_array_03)
N_totalnormal_1 = np.sum(N_totalnormal_array_1)
N_totalnormal_10 = np.sum(N_totalnormal_array_10)
N_totalnormal_30 = np.sum(N_totalnormal_array_30)
N_totalnormal_100 = np.sum(N_totalnormal_array_100)
N_totalnormal_1000 = np.sum(N_totalnormal_array_1000)
N_totalobservablenormal = np.sum(N_totalobservablenormal_array)
N_totalobservablenormal_03 = np.sum(N_totalobservablenormal_array_03)
N_totalobservablenormal_1 = np.sum(N_totalobservablenormal_array_1)
N_totalobservablenormal_10 = np.sum(N_totalobservablenormal_array_10)
N_totalobservablenormal_30 = np.sum(N_totalobservablenormal_array_30)
N_totalobservablenormal_100 = np.sum(N_totalobservablenormal_array_100)
N_totalobservablenormal_1000 = np.sum(N_totalobservablenormal_array_1000)
N_totalrecoverablenormal = np.sum(N_totalrecoverablenormal_array)
N_totalrecoverablenormal_03 = np.sum(N_totalrecoverablenormal_array_03)
N_totalrecoverablenormal_1 = np.sum(N_totalrecoverablenormal_array_1)
N_totalrecoverablenormal_10 = np.sum(N_totalrecoverablenormal_array_10)
N_totalrecoverablenormal_30 = np.sum(N_totalrecoverablenormal_array_30)
N_totalrecoverablenormal_100 = np.sum(N_totalrecoverablenormal_array_100)
N_totalrecoverablenormal_1000 = np.sum(N_totalrecoverablenormal_array_1000)
N_totalnormal22 = np.sum(N_totalnormal22_array)
N_totalnormal22_03 = np.sum(N_totalnormal22_array_03)
N_totalnormal22_1 = np.sum(N_totalnormal22_array_1)
N_totalnormal22_10 = np.sum(N_totalnormal22_array_10)
N_totalnormal22_30 = np.sum(N_totalnormal22_array_30)
N_totalnormal22_100 = np.sum(N_totalnormal22_array_100)
N_totalnormal22_1000 = np.sum(N_totalnormal22_array_1000)
N_totalobservablenormal22 = np.sum(N_totalobservablenormal22_array)
N_totalobservablenormal22_03 = np.sum(N_totalobservablenormal22_array_03)
N_totalobservablenormal22_1 = np.sum(N_totalobservablenormal22_array_1)
N_totalobservablenormal22_10 = np.sum(N_totalobservablenormal22_array_10)
N_totalobservablenormal22_30 = np.sum(N_totalobservablenormal22_array_30)
N_totalobservablenormal22_100 = np.sum(N_totalobservablenormal22_array_100)
N_totalobservablenormal22_1000 = np.sum(N_totalobservablenormal22_array_1000)
N_totalrecoverablenormal22 = np.sum(N_totalrecoverablenormal22_array)
N_totalrecoverablenormal22_03 = np.sum(N_totalrecoverablenormal22_array_03)
N_totalrecoverablenormal22_1 = np.sum(N_totalrecoverablenormal22_array_1)
N_totalrecoverablenormal22_10 = np.sum(N_totalrecoverablenormal22_array_10)
N_totalrecoverablenormal22_30 = | np.sum(N_totalrecoverablenormal22_array_30) | numpy.sum |
import numpy as np
from frites.conn.conn_tf import _tf_decomp
from frites.conn.conn_spec import conn_spec
class TestConnSpec:
np.random.seed(0)
n_roi, n_times, n_epochs = 4, 1000, 20
n_edges = int(n_roi * (n_roi - 1) / 2)
sfreq, freqs = 200, np.arange(1, 51, 1)
n_freqs = len(freqs)
n_cycles = freqs / 2
times = np.arange(0, n_times // sfreq, 1 / sfreq)
eta = np.random.normal(0, 1, size=(n_epochs, n_roi, n_times))
def test_tf_decomp(self, ):
# Test output shape
for mode in ["morlet", "multitaper"]:
out = _tf_decomp(self.eta, self.sfreq, self.freqs, mode=mode,
n_cycles=self.n_cycles, n_jobs=1)
self.__assert_shape(out.shape, conn=False)
# For multitaper test both single and array mt_bandwidth
out1 = _tf_decomp(self.eta, self.sfreq, self.freqs, mode="multitaper",
n_cycles=self.n_cycles, mt_bandwidth=4, n_jobs=1)
out2 = _tf_decomp(self.eta, self.sfreq, self.freqs, mode="multitaper",
n_cycles=self.n_cycles,
mt_bandwidth=[4] * self.n_freqs, n_jobs=1)
np.testing.assert_array_equal(out1, out2)
##################################################################
# Compare the auto-spectra with groundtruth
##################################################################
for mode in ["morlet", "multitaper"]:
# 1. Compare for stationary sinal
x = self.__get_signal(stationary=True)
out = _tf_decomp(x, self.sfreq, self.freqs, mode=mode,
n_cycles=self.n_cycles, n_jobs=1)
out = (out * np.conj(out)).real
if mode == "morlet":
val, atol = 20, 2
else:
val, atol = 5.8, 0.35
idx_f = self.__get_freqs_indexes(28, 32)
actual = out.mean(axis=(0, -1))[:, idx_f].mean(1)
np.testing.assert_allclose(
actual, val * np.ones_like(actual), atol=atol)
# 2. Compare for non-stationary signal
x = self.__get_signal(stationary=False)
out = _tf_decomp(x, self.sfreq, self.freqs, mode=mode,
n_cycles=self.n_cycles, n_jobs=1)
out = (out * np.conj(out)).real
if mode == "morlet":
val, atol = 11, 1
else:
val, atol = 3.2, 0.3
actual1 = out.mean(
axis=(0, -1))[:, self.__get_freqs_indexes(8, 12)].mean(1)
actual2 = out.mean(
axis=(0, -1))[:, self.__get_freqs_indexes(28, 32)].mean(1)
np.testing.assert_allclose(actual1, val * np.ones_like(actual),
atol=atol)
np.testing.assert_allclose(actual2, val * np.ones_like(actual),
atol=atol)
def test_conn_spec(self,):
"""Test function conn_spec"""
# General parameters for the conn_spec function
kw = dict(sfreq=self.sfreq, freqs=self.freqs, n_jobs=1, verbose=False,
n_cycles=self.n_cycles, times=self.times, sm_kernel='square')
for method in ['coh', 'plv']:
##################################################################
# Check general attributes of the conn_spec container
##################################################################
# Compute coherence for white noise
out = conn_spec(self.eta, sm_times=2., metric=method, **kw)
# Test container attributes, dims and coords
assert out.name == method
self.__assert_shape(out.shape)
self.__assert_default_rois(out.roi.data)
self.__assert_dims(out.dims)
self.__assert_attrs(out.attrs)
##################################################################
# Compare output with groundtruth
##################################################################
# 1. Compare with spectral conn for stationary sinal
x = self.__get_signal(stationary=True)
out = conn_spec(x, sm_times=2., metric=method, **kw)
actual = out.mean(dim=("trials", "times")).sel(
freqs=slice(28, 32)).mean("freqs")
np.testing.assert_allclose(
actual, 0.80 * np.ones_like(actual), atol=0.1)
# 2. Compare with no stationary signal
x = self.__get_signal(stationary=False)
out = conn_spec(x, sm_times=0.6, metric=method, **kw)
actual_1 = out.mean("trials").sel(freqs=slice(8, 12),
times=slice(0.5, 2.2))
actual_2 = out.mean("trials").sel(freqs=slice(28, 33),
times=slice(2.8, 4.7))
actual_1 = actual_1.mean(dim="freqs")
actual_2 = actual_2.mean(dim="freqs")
if method == "coh":
val = 0.8
else:
val = 0.9
np.testing.assert_allclose(actual_1, val * | np.ones_like(actual_1) | numpy.ones_like |
"""
from photo_wct.py of https://github.com/NVIDIA/FastPhotoStyle
Copyright (C) 2018 NVIDIA Corporation.
Licensed under the CC BY-NC-SA 4.0
"""
import os
import datetime
import numpy as np
from PIL import Image
from torchvision import transforms
from torchvision.utils import save_image
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
class Timer:
def __init__(self, msg='Elapsed time: {}', verbose=True):
self.msg = msg
self.start_time = None
self.verbose = verbose
def __enter__(self):
self.start_time = datetime.datetime.now()
def __exit__(self, exc_type, exc_value, exc_tb):
if self.verbose:
print(self.msg.format(datetime.datetime.now() - self.start_time))
def open_image(image_path, image_size=None):
image = Image.open(image_path)
_transforms = []
if image_size is not None:
image = transforms.Resize(image_size)(image)
# _transforms.append(transforms.Resize(image_size))
w, h = image.size
_transforms.append(transforms.CenterCrop((h // 16 * 16, w // 16 * 16)))
_transforms.append(transforms.ToTensor())
#_transforms.append(transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD))
transform = transforms.Compose(_transforms)
return transform(image).unsqueeze(0)
def change_seg(seg):
color_dict = {
(0, 0, 255): 3, # blue
(0, 255, 0): 2, # green
(0, 0, 0): 0, # black
(255, 255, 255): 1, # white
(255, 0, 0): 4, # red
(255, 255, 0): 5, # yellow
(128, 128, 128): 6, # grey
(0, 255, 255): 7, # lightblue
(255, 0, 255): 8 # purple
}
arr_seg = np.asarray(seg)
new_seg = | np.zeros(arr_seg.shape[:-1]) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 8 13:54:55 2020
@author: akurnizk
"""
import csv
import math
import time
import sys,os
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib import pylab
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
map_dir = r'E:\Maps' # retrieved files from https://viewer.nationalmap.gov/basic/
data_dir = os.path.join('E:\Data')
#%% Interpolate nans in arrays
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
#%% Loading Information from HR Dike Sensors (Make sure times are in EDT)
with open(os.path.join(data_dir,"General Dike Data","USGS 011058798 Herring R at Chequessett Neck Rd.txt")) as f:
reader = csv.reader(f, delimiter="\t")
HR_dike_all_info = list(reader)
HR_dike_lev_disch_cond = HR_dike_all_info[32:]
HR_dike_all_df = pd.DataFrame(HR_dike_lev_disch_cond[2:], columns=HR_dike_lev_disch_cond[0])
HR_dike_all_df.drop(HR_dike_all_df.columns[[0,1,3,5,7,9,11,13]],axis=1,inplace=True)
HR_dike_all_df.columns = ["datetime","Gage height, ft, Ocean side","Discharge, cfs","Gage height, ft, HR side",
"Spec Con, microsiemens/cm, HR side","Spec Con, microsiemens/cm, Ocean side"]
# Make strings numeric
HR_dike_all_df = HR_dike_all_df.replace("Eqp", '', regex=True)
HR_dike_all_df["datetime"] = pd.to_datetime(HR_dike_all_df["datetime"])
HR_dike_all_df["Gage height, ft, Ocean side"] = pd.to_numeric(HR_dike_all_df["Gage height, ft, Ocean side"])
HR_dike_all_df["Discharge, cfs"] = pd.to_numeric(HR_dike_all_df["Discharge, cfs"])
HR_dike_all_df["Gage height, ft, HR side"] = pd.to_numeric(HR_dike_all_df["Gage height, ft, HR side"])
HR_dike_all_df["Spec Con, microsiemens/cm, HR side"] = pd.to_numeric(HR_dike_all_df["Spec Con, microsiemens/cm, HR side"])
HR_dike_all_df["Spec Con, microsiemens/cm, Ocean side"] = pd.to_numeric(HR_dike_all_df["Spec Con, microsiemens/cm, Ocean side"])
# Merging Duplicate Entries
HR_dike_all_df.set_index('datetime',inplace=True)
HR_dike_all_df = HR_dike_all_df.mean(level=0)
HR_dike_all_df.reset_index(inplace=True)
# Remove conductivity columns, convert to metric system
HR_dike_lev_disch_ft = HR_dike_all_df[["datetime","Gage height, ft, Ocean side","Gage height, ft, HR side","Discharge, cfs"]]
HR_dike_lev_disch_m = HR_dike_lev_disch_ft.copy()
HR_dike_lev_disch_m.columns = ["datetime","Gage height, m, Ocean side","Gage height, m, HR side","Discharge, cms"]
HR_dike_lev_disch_m["Gage height, m, Ocean side"] = HR_dike_lev_disch_ft["Gage height, ft, Ocean side"]*0.3048
HR_dike_lev_disch_m["Gage height, m, HR side"] = HR_dike_lev_disch_ft["Gage height, ft, HR side"]*0.3048
HR_dike_lev_disch_m["Discharge, cms"] = HR_dike_lev_disch_ft["Discharge, cfs"]*0.02832
#%% Load HR Geometry and CTD data
out_x_stacked = np.loadtxt(os.path.join(map_dir, 'HR_XsecLines','HR_xsec_all_xcoords.csv'), delimiter=',')
out_y_stacked = np.loadtxt(os.path.join(map_dir, 'HR_XsecLines','HR_xsec_all_ycoords.csv'), delimiter=',')
elevs_interp = np.loadtxt(os.path.join(map_dir, 'HR_XsecLines','HR_xsec_all_elevs.csv'), delimiter=',')
intersect_newxy = np.loadtxt(os.path.join(map_dir, 'HR_XsecLines','HR_xsec_all_inscts.csv'), delimiter=',')
min_dist_dx = np.loadtxt(os.path.join(map_dir, 'HR_XsecLines','HR_xsec_all_dx.csv'), delimiter=',')
# make top of array the upstream-most section?
out_x_stacked = np.flip(out_x_stacked,axis=0)
out_y_stacked = np.flip(out_y_stacked,axis=0)
elevs_interp = | np.flip(elevs_interp,axis=0) | numpy.flip |
# Simulation
import numpy as np
import numba
from numba import jit
# standard HMC
def hmc_mh_resample_uni(u_func, du_func, epsilon, nt, m, M, theta_init):
"""
This is a function to realize Hamiltonian Monte Carlo with Metropolis-Hastings
correction in unidimensional cases with resampling procedure.
"""
theta = [theta_init]
r = []
for t in range(nt):
r.append(np.random.normal(0, np.sqrt(M)))
theta0, r0 = theta[-1], r[-1]
r0 = r0 - epsilon/2*du_func(theta0)
for i in range(m):
theta0 = theta0 + epsilon*1/M*r0
r0 = r0 - epsilon*du_func(theta0)
r0 = r0 - epsilon/2*du_func(theta0)
# Metropolis-Hastings correction
u = np.random.uniform()
H1 = u_func(theta0) + 1/2*r0**2*1/M
H2 = u_func(theta[-1]) + 1/2*r[-1]**2*1/M
p = np.exp(H2 - H1)
if u < min(1,p):
theta.append(theta0)
return [theta[:-1], r]
def hmc_nomh_resample_uni(du_func, epsilon, nt, m, M, theta_init):
"""
This is a function to realize Hamiltonian Monte Carlo without Metropolis-Hastings
correction in unidimensional cases with resampling procedure.
"""
theta = [theta_init]
r = []
for t in range(nt):
r.append(np.random.normal(0, np.sqrt(M)))
theta0, r0 = theta[-1], r[-1]
r0 = r0 - epsilon/2*du_func(theta0)
for i in range(m):
theta0 = theta0 + epsilon*1/M*r0
r0 = r0 - epsilon*du_func(theta0)
r0 = r0 - epsilon/2*du_func(theta0)
# No Metropolis-Hastings correction
theta.append(theta0)
return [theta[:-1], r]
def hmc_mh_resample_multi(u_func, du_func, epsilon, nt, m, M, theta_init):
"""
This is a function to realize Hamiltonian Monte Carlo with Metropolis-Hastings
correction in multidimensional cases with resampling procedure.
"""
theta = [theta_init]
r = []
for t in range(nt):
r.append(np.random.multivariate_normal(np.zeros(M.shape[0]), M))
theta0, r0 = theta[-1], r[-1]
r0 = r0 - epsilon/2*du_func(theta0)
for i in range(m):
theta0 = theta0 + epsilon*np.linalg.inv(M)@r0
r0 = r0 - epsilon*du_func(theta0)
r0 = r0 - epsilon/2*du_func(theta0)
# Metropolis-Hastings correction
u = | np.random.uniform() | numpy.random.uniform |
# Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
# Compare 3 turbine results to SOWFA in 8 m/s, higher TI case
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import floris.tools as wfct
# HIGH TI
# Low TI
# Write out SOWFA results
layout_x = (1000.0, 1756.0, 2512.0, 3268.0, 4024.0)
layout_y = (1000.0, 1000.0, 1000.0, 1000.0, 1000.0)
sowfa_results = np.array(
[
[1946.3, 654.7, 764.8, 825, 819.8, 0, 0, 0, 0, 0],
[1701.8, 947.9, 1091.7, 1037.9, 992.8, 20, 15, 10, 5, 0],
[1587.2, 1202.3, 971.6, 857.3, 860.9, 25, 0, 0, 0, 0],
[1588.4, 1007.8, 1207, 1190.9, 1173.2, 25, 20, 15, 10, 0],
[1588.6, 928.6, 1428.6, 1031.1, 939.4, 25, 25, 0, 0, 0],
]
)
df_sowfa = pd.DataFrame(
sowfa_results, columns=["p0", "p1", "p2", "p3", "p4", "y0", "y1", "y2", "y3", "y4"]
)
# SET UP FLORIS AND MATCH TO BASE CASE
wind_speed = 8.39
TI = 0.065
# Initialize the FLORIS interface fi, use default model
fi = wfct.floris_interface.FlorisInterface("../../example_input.json")
fi.reinitialize_flow_field(
wind_speed=[wind_speed],
turbulence_intensity=[TI],
layout_array=(layout_x, layout_y),
)
# Setup alternative with gch off
fi_b = copy.deepcopy(fi)
fi_b.set_gch(False)
# Compare yaw combinations
yaw_combinations = [(0, 0, 0, 0, 0), (25, 0, 0, 0, 0), (25, 25, 0, 0, 0)]
yaw_names = ["%d/%d/%d/%d/%d" % yc for yc in yaw_combinations]
# Plot individual turbine powers
fig, axarr = plt.subplots(1, 3, sharex=True, sharey=True, figsize=(12, 5))
total_sowfa = []
total_gch_on = []
total_gch_off = []
for y_idx, yc in enumerate(yaw_combinations):
# Collect SOWFA DATA
s_data = df_sowfa[
(df_sowfa.y0 == yc[0])
& (df_sowfa.y1 == yc[1])
& (df_sowfa.y2 == yc[2])
& (df_sowfa.y2 == yc[3])
& (df_sowfa.y2 == yc[4])
]
s_data = [
s_data.p0.values[0],
s_data.p1.values[0],
s_data.p2.values[0],
s_data.p3.values[0],
s_data.p4.values[0],
]
total_sowfa.append(np.sum(s_data))
# Collect GCH ON data
fi.calculate_wake(yaw_angles=yc)
g_data = np.array(fi.get_turbine_power()) / 1000.0
total_gch_on.append(np.sum(g_data))
# Collect GCH OFF data
fi_b.calculate_wake(yaw_angles=yc)
b_data = np.array(fi_b.get_turbine_power()) / 1000.0
total_gch_off.append(np.sum(b_data))
ax = axarr[y_idx]
ax.set_title(yc)
ax.plot(["T0", "T1", "T2", "T3", "T4"], s_data, "k", marker="s", label="SOWFA")
ax.plot(["T0", "T1", "T2", "T3", "T4"], g_data, "g", marker="o", label="GCH ON")
ax.plot(["T0", "T1", "T2", "T3", "T4"], b_data, "b", marker="*", label="GCH OFF")
axarr[-1].legend()
# Calculate totals and normalized totals
total_sowfa = np.array(total_sowfa)
nom_sowfa = total_sowfa / total_sowfa[0]
total_gch_on = np.array(total_gch_on)
nom_gch_on = total_gch_on / total_gch_on[0]
total_gch_off = | np.array(total_gch_off) | numpy.array |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for intersecting B |eacute| zier curves via geometric methods.
The functions here are pure Python and many have equivalent implementations
written in Fortran and exposed via a Cython wrapper.
.. |eacute| unicode:: U+000E9 .. LATIN SMALL LETTER E WITH ACUTE
:trim:
"""
import itertools
import numpy as np
from bezier.hazmat import curve_helpers
from bezier.hazmat import helpers as _py_helpers
from bezier.hazmat import intersection_helpers
# Set the threshold for exponent at half the bits available, this way one round
# of Newton's method can (usually) finish the job by squaring the error.
_ERROR_VAL = 0.5 ** 26
_MAX_INTERSECT_SUBDIVISIONS = 20
_MAX_CANDIDATES = 64
_UNHANDLED_LINES = (
"If both curves are lines, the intersection should have "
"been computed already."
)
_TOO_MANY_TEMPLATE = (
"The number of candidate intersections is too high.\n"
"{:d} candidate pairs."
)
_NO_CONVERGE_TEMPLATE = (
"Curve intersection failed to converge to approximately linear "
"subdivisions after {:d} iterations."
)
_MIN_INTERVAL_WIDTH = 0.5 ** 40
def bbox_intersect(nodes1, nodes2):
r"""Bounding box intersection predicate.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Determines if the bounding box of two sets of control points
intersects in :math:`\mathbf{R}^2` with non-trivial
intersection (i.e. tangent bounding boxes are insufficient).
.. note::
Though we assume (and the code relies on this fact) that
the nodes are two-dimensional, we don't check it.
Args:
nodes1 (numpy.ndarray): Set of control points for a
B |eacute| zier shape.
nodes2 (numpy.ndarray): Set of control points for a
B |eacute| zier shape.
Returns:
int: Enum from :class:`.BoxIntersectionType` indicating the type of
bounding box intersection.
"""
left1, right1, bottom1, top1 = _py_helpers.bbox(nodes1)
left2, right2, bottom2, top2 = _py_helpers.bbox(nodes2)
if right2 < left1 or right1 < left2 or top2 < bottom1 or top1 < bottom2:
return BoxIntersectionType.DISJOINT
if (
right2 == left1
or right1 == left2
or top2 == bottom1
or top1 == bottom2
):
return BoxIntersectionType.TANGENT
else:
return BoxIntersectionType.INTERSECTION
def linearization_error(nodes):
r"""Compute the maximum error of a linear approximation.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
.. note::
This is a helper for :class:`.Linearization`, which is used during the
curve-curve intersection process.
We use the line
.. math::
L(s) = v_0 (1 - s) + v_n s
and compute a bound on the maximum error
.. math::
\max_{s \in \left[0, 1\right]} \|B(s) - L(s)\|_2.
Rather than computing the actual maximum (a tight bound), we
use an upper bound via the remainder from Lagrange interpolation
in each component. This leaves us with :math:`\frac{s(s - 1)}{2!}`
times the second derivative in each component.
The second derivative curve is degree :math:`d = n - 2` and
is given by
.. math::
B''(s) = n(n - 1) \sum_{j = 0}^{d} \binom{d}{j} s^j
(1 - s)^{d - j} \cdot \Delta^2 v_j
Due to this form (and the convex combination property of
B |eacute| zier Curves) we know each component of the second derivative
will be bounded by the maximum of that component among the
:math:`\Delta^2 v_j`.
For example, the curve
.. math::
B(s) = \left[\begin{array}{c} 0 \\ 0 \end{array}\right] (1 - s)^2
+ \left[\begin{array}{c} 3 \\ 1 \end{array}\right] 2s(1 - s)
+ \left[\begin{array}{c} 9 \\ -2 \end{array}\right] s^2
has
:math:`B''(s) \equiv \left[\begin{array}{c} 6 \\ -8 \end{array}\right]`
which has norm :math:`10` everywhere, hence the maximum error is
.. math::
\left.\frac{s(1 - s)}{2!} \cdot 10\right|_{s = \frac{1}{2}}
= \frac{5}{4}.
.. image:: ../../images/linearization_error.png
:align: center
.. testsetup:: linearization-error, linearization-error-fail
import numpy as np
import bezier
from bezier.hazmat.geometric_intersection import linearization_error
.. doctest:: linearization-error
>>> nodes = np.asfortranarray([
... [0.0, 3.0, 9.0],
... [0.0, 1.0, -2.0],
... ])
>>> linearization_error(nodes)
1.25
.. testcleanup:: linearization-error
import make_images
make_images.linearization_error(nodes)
As a **non-example**, consider a "pathological" set of control points:
.. math::
B(s) = \left[\begin{array}{c} 0 \\ 0 \end{array}\right] (1 - s)^3
+ \left[\begin{array}{c} 5 \\ 12 \end{array}\right] 3s(1 - s)^2
+ \left[\begin{array}{c} 10 \\ 24 \end{array}\right] 3s^2(1 - s)
+ \left[\begin{array}{c} 30 \\ 72 \end{array}\right] s^3
By construction, this lies on the line :math:`y = \frac{12x}{5}`, but
the parametrization is cubic:
:math:`12 \cdot x(s) = 5 \cdot y(s) = 180s(s^2 + 1)`. Hence, the fact
that the curve is a line is not accounted for and we take the worse
case among the nodes in:
.. math::
B''(s) = 3 \cdot 2 \cdot \left(
\left[\begin{array}{c} 0 \\ 0 \end{array}\right] (1 - s)
+ \left[\begin{array}{c} 15 \\ 36 \end{array}\right] s\right)
which gives a nonzero maximum error:
.. doctest:: linearization-error-fail
>>> nodes = np.asfortranarray([
... [0.0, 5.0, 10.0, 30.0],
... [0.0, 12.0, 24.0, 72.0],
... ])
>>> linearization_error(nodes)
29.25
Though it may seem that ``0`` is a more appropriate answer, consider
the **goal** of this function. We seek to linearize curves and then
intersect the linear approximations. Then the :math:`s`-values from
the line-line intersection is lifted back to the curves. Thus
the error :math:`\|B(s) - L(s)\|_2` is more relevant than the
underyling algebraic curve containing :math:`B(s)`.
.. note::
It may be more appropriate to use a **relative** linearization error
rather than the **absolute** error provided here. It's unclear if
the domain :math:`\left[0, 1\right]` means the error is **already**
adequately scaled or if the error should be scaled by the arc
length of the curve or the (easier-to-compute) length of the line.
Args:
nodes (numpy.ndarray): Nodes of a curve.
Returns:
float: The maximum error between the curve and the
linear approximation.
"""
_, num_nodes = nodes.shape
degree = num_nodes - 1
if degree == 1:
return 0.0
second_deriv = nodes[:, :-2] - 2.0 * nodes[:, 1:-1] + nodes[:, 2:]
worst_case = np.max(np.abs(second_deriv), axis=1)
# max_{0 <= s <= 1} s(1 - s)/2 = 1/8 = 0.125
multiplier = 0.125 * degree * (degree - 1)
# NOTE: worst_case is 1D due to np.max(), so this is the vector norm.
return multiplier * np.linalg.norm(worst_case, ord=2)
def segment_intersection(start0, end0, start1, end1):
r"""Determine the intersection of two line segments.
Assumes each line is parametric
.. math::
\begin{alignat*}{2}
L_0(s) &= S_0 (1 - s) + E_0 s &&= S_0 + s \Delta_0 \\
L_1(t) &= S_1 (1 - t) + E_1 t &&= S_1 + t \Delta_1.
\end{alignat*}
To solve :math:`S_0 + s \Delta_0 = S_1 + t \Delta_1`, we use the
cross product:
.. math::
\left(S_0 + s \Delta_0\right) \times \Delta_1 =
\left(S_1 + t \Delta_1\right) \times \Delta_1 \Longrightarrow
s \left(\Delta_0 \times \Delta_1\right) =
\left(S_1 - S_0\right) \times \Delta_1.
Similarly
.. math::
\Delta_0 \times \left(S_0 + s \Delta_0\right) =
\Delta_0 \times \left(S_1 + t \Delta_1\right) \Longrightarrow
\left(S_1 - S_0\right) \times \Delta_0 =
\Delta_0 \times \left(S_0 - S_1\right) =
t \left(\Delta_0 \times \Delta_1\right).
.. note::
Since our points are in :math:`\mathbf{R}^2`, the "traditional"
cross product in :math:`\mathbf{R}^3` will always point in the
:math:`z` direction, so in the above we mean the :math:`z`
component of the cross product, rather than the entire vector.
For example, the diagonal lines
.. math::
\begin{align*}
L_0(s) &= \left[\begin{array}{c} 0 \\ 0 \end{array}\right] (1 - s) +
\left[\begin{array}{c} 2 \\ 2 \end{array}\right] s \\
L_1(t) &= \left[\begin{array}{c} -1 \\ 2 \end{array}\right] (1 - t) +
\left[\begin{array}{c} 1 \\ 0 \end{array}\right] t
\end{align*}
intersect at :math:`L_0\left(\frac{1}{4}\right) =
L_1\left(\frac{3}{4}\right) =
\frac{1}{2} \left[\begin{array}{c} 1 \\ 1 \end{array}\right]`.
.. image:: ../../images/segment_intersection1.png
:align: center
.. testsetup:: segment-intersection1, segment-intersection2
import numpy as np
from bezier.hazmat.geometric_intersection import segment_intersection
.. doctest:: segment-intersection1
:options: +NORMALIZE_WHITESPACE
>>> start0 = np.asfortranarray([0.0, 0.0])
>>> end0 = np.asfortranarray([2.0, 2.0])
>>> start1 = np.asfortranarray([-1.0, 2.0])
>>> end1 = np.asfortranarray([1.0, 0.0])
>>> s, t, _ = segment_intersection(start0, end0, start1, end1)
>>> s
0.25
>>> t
0.75
.. testcleanup:: segment-intersection1
import make_images
make_images.segment_intersection1(start0, end0, start1, end1, s)
Taking the parallel (but different) lines
.. math::
\begin{align*}
L_0(s) &= \left[\begin{array}{c} 1 \\ 0 \end{array}\right] (1 - s) +
\left[\begin{array}{c} 0 \\ 1 \end{array}\right] s \\
L_1(t) &= \left[\begin{array}{c} -1 \\ 3 \end{array}\right] (1 - t) +
\left[\begin{array}{c} 3 \\ -1 \end{array}\right] t
\end{align*}
we should be able to determine that the lines don't intersect, but
this function is not meant for that check:
.. image:: ../../images/segment_intersection2.png
:align: center
.. doctest:: segment-intersection2
:options: +NORMALIZE_WHITESPACE
>>> start0 = np.asfortranarray([1.0, 0.0])
>>> end0 = np.asfortranarray([0.0, 1.0])
>>> start1 = np.asfortranarray([-1.0, 3.0])
>>> end1 = np.asfortranarray([3.0, -1.0])
>>> _, _, success = segment_intersection(start0, end0, start1, end1)
>>> success
False
.. testcleanup:: segment-intersection2
import make_images
make_images.segment_intersection2(start0, end0, start1, end1)
Instead, we use :func:`parallel_lines_parameters`:
.. testsetup:: segment-intersection2-continued
import numpy as np
from bezier.hazmat.geometric_intersection import (
parallel_lines_parameters
)
start0 = np.asfortranarray([1.0, 0.0])
end0 = np.asfortranarray([0.0, 1.0])
start1 = np.asfortranarray([-1.0, 3.0])
end1 = np.asfortranarray([3.0, -1.0])
.. doctest:: segment-intersection2-continued
>>> disjoint, _ = parallel_lines_parameters(start0, end0, start1, end1)
>>> disjoint
True
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
start0 (numpy.ndarray): A 1D NumPy ``2``-array that is the start
vector :math:`S_0` of the parametric line :math:`L_0(s)`.
end0 (numpy.ndarray): A 1D NumPy ``2``-array that is the end
vector :math:`E_0` of the parametric line :math:`L_0(s)`.
start1 (numpy.ndarray): A 1D NumPy ``2``-array that is the start
vector :math:`S_1` of the parametric line :math:`L_1(s)`.
end1 (numpy.ndarray): A 1D NumPy ``2``-array that is the end
vector :math:`E_1` of the parametric line :math:`L_1(s)`.
Returns:
Tuple[float, float, bool]: Pair of :math:`s_{\ast}` and
:math:`t_{\ast}` such that the lines intersect:
:math:`L_0\left(s_{\ast}\right) = L_1\left(t_{\ast}\right)` and then
a boolean indicating if an intersection was found (i.e. if the lines
aren't parallel).
"""
delta0 = end0 - start0
delta1 = end1 - start1
cross_d0_d1 = _py_helpers.cross_product(delta0, delta1)
if cross_d0_d1 == 0.0:
return None, None, False
else:
start_delta = start1 - start0
s = _py_helpers.cross_product(start_delta, delta1) / cross_d0_d1
t = _py_helpers.cross_product(start_delta, delta0) / cross_d0_d1
return s, t, True
def parallel_lines_parameters(start0, end0, start1, end1):
r"""Checks if two parallel lines ever meet.
Meant as a back-up when :func:`segment_intersection` fails.
.. note::
This function assumes but never verifies that the lines
are parallel.
In the case that the segments are parallel and lie on **different**
lines, then there is a **guarantee** of no intersection. However, if
they are on the exact same line, they may define a shared segment
coincident to both lines.
In :func:`segment_intersection`, we utilized the normal form of the
lines (via the cross product):
.. math::
\begin{align*}
L_0(s) \times \Delta_0 &\equiv S_0 \times \Delta_0 \\
L_1(t) \times \Delta_1 &\equiv S_1 \times \Delta_1
\end{align*}
So, we can detect if :math:`S_1` is on the first line by
checking if
.. math::
S_0 \times \Delta_0 \stackrel{?}{=} S_1 \times \Delta_0.
If it is not on the first line, then we are done, the
segments don't meet:
.. image:: ../../images/parallel_lines_parameters1.png
:align: center
.. testsetup:: parallel-different1, parallel-different2
import numpy as np
from bezier.hazmat.geometric_intersection import (
parallel_lines_parameters
)
.. doctest:: parallel-different1
>>> # Line: y = 1
>>> start0 = np.asfortranarray([0.0, 1.0])
>>> end0 = np.asfortranarray([1.0, 1.0])
>>> # Vertical shift up: y = 2
>>> start1 = np.asfortranarray([-1.0, 2.0])
>>> end1 = np.asfortranarray([3.0, 2.0])
>>> disjoint, _ = parallel_lines_parameters(start0, end0, start1, end1)
>>> disjoint
True
.. testcleanup:: parallel-different1
import make_images
make_images.helper_parallel_lines(
start0, end0, start1, end1, "parallel_lines_parameters1.png")
If :math:`S_1` **is** on the first line, we want to check that
:math:`S_1` and :math:`E_1` define parameters outside of
:math:`\left[0, 1\right]`. To compute these parameters:
.. math::
L_1(t) = S_0 + s_{\ast} \Delta_0 \Longrightarrow
s_{\ast} = \frac{\Delta_0^T \left(
L_1(t) - S_0\right)}{\Delta_0^T \Delta_0}.
For example, the intervals :math:`\left[0, 1\right]` and
:math:`\left[\frac{3}{2}, 2\right]` (via
:math:`S_1 = S_0 + \frac{3}{2} \Delta_0` and
:math:`E_1 = S_0 + 2 \Delta_0`) correspond to segments that
don't meet:
.. image:: ../../images/parallel_lines_parameters2.png
:align: center
.. doctest:: parallel-different2
>>> start0 = np.asfortranarray([1.0, 0.0])
>>> delta0 = np.asfortranarray([2.0, -1.0])
>>> end0 = start0 + 1.0 * delta0
>>> start1 = start0 + 1.5 * delta0
>>> end1 = start0 + 2.0 * delta0
>>> disjoint, _ = parallel_lines_parameters(start0, end0, start1, end1)
>>> disjoint
True
.. testcleanup:: parallel-different2
import make_images
make_images.helper_parallel_lines(
start0, end0, start1, end1, "parallel_lines_parameters2.png")
but if the intervals overlap, like :math:`\left[0, 1\right]` and
:math:`\left[-1, \frac{1}{2}\right]`, the segments meet:
.. image:: ../../images/parallel_lines_parameters3.png
:align: center
.. testsetup:: parallel-different3, parallel-different4
import numpy as np
from bezier.hazmat.geometric_intersection import (
parallel_lines_parameters
)
start0 = np.asfortranarray([1.0, 0.0])
delta0 = np.asfortranarray([2.0, -1.0])
end0 = start0 + 1.0 * delta0
.. doctest:: parallel-different3
>>> start1 = start0 - 1.5 * delta0
>>> end1 = start0 + 0.5 * delta0
>>> disjoint, parameters = parallel_lines_parameters(
... start0, end0, start1, end1)
>>> disjoint
False
>>> parameters
array([[0. , 0.5 ],
[0.75, 1. ]])
.. testcleanup:: parallel-different3
import make_images
make_images.helper_parallel_lines(
start0, end0, start1, end1, "parallel_lines_parameters3.png")
Similarly, if the second interval completely contains the first,
the segments meet:
.. image:: ../../images/parallel_lines_parameters4.png
:align: center
.. doctest:: parallel-different4
>>> start1 = start0 + 4.5 * delta0
>>> end1 = start0 - 3.5 * delta0
>>> disjoint, parameters = parallel_lines_parameters(
... start0, end0, start1, end1)
>>> disjoint
False
>>> parameters
array([[1. , 0. ],
[0.4375, 0.5625]])
.. testcleanup:: parallel-different4
import make_images
make_images.helper_parallel_lines(
start0, end0, start1, end1, "parallel_lines_parameters4.png")
.. note::
This function doesn't currently allow wiggle room around the
desired value, i.e. the two values must be bitwise identical.
However, the most "correct" version of this function likely
should allow for some round off.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
start0 (numpy.ndarray): A 1D NumPy ``2``-array that is the start
vector :math:`S_0` of the parametric line :math:`L_0(s)`.
end0 (numpy.ndarray): A 1D NumPy ``2``-array that is the end
vector :math:`E_0` of the parametric line :math:`L_0(s)`.
start1 (numpy.ndarray): A 1D NumPy ``2``-array that is the start
vector :math:`S_1` of the parametric line :math:`L_1(s)`.
end1 (numpy.ndarray): A 1D NumPy ``2``-array that is the end
vector :math:`E_1` of the parametric line :math:`L_1(s)`.
Returns:
Tuple[bool, Optional[numpy.ndarray]]: A pair of
* Flag indicating if the lines are disjoint.
* An optional ``2 x 2`` matrix of ``s-t`` parameters only present if
the lines aren't disjoint. The first column will contain the
parameters at the beginning of the shared segment and the second
column will correspond to the end of the shared segment.
"""
# NOTE: There is no corresponding "enable", but the disable only applies
# in this lexical scope.
# pylint: disable=too-many-branches
delta0 = end0 - start0
line0_const = _py_helpers.cross_product(start0, delta0)
start1_against = _py_helpers.cross_product(start1, delta0)
if line0_const != start1_against:
return True, None
# Each array is a 1D vector, so we can use the vector dot product.
norm0_sq = np.vdot(delta0, delta0)
# S1 = L1(0) = S0 + sA D0
# <==> sA D0 = S1 - S0
# ==> sA (D0^T D0) = D0^T (S1 - S0)
s_val0 = np.vdot(start1 - start0, delta0) / norm0_sq
# E1 = L1(1) = S0 + sB D0
# <==> sB D0 = E1 - S0
# ==> sB (D0^T D0) = D0^T (E1 - S0)
s_val1 = np.vdot(end1 - start0, delta0) / norm0_sq
# s = s_val0 + t (s_val1 - s_val0)
# t = 0 <==> s = s_val0
# t = 1 <==> s = s_val1
# t = -s_val0 / (s_val1 - s_val0) <==> s = 0
# t = (1 - s_val0) / (s_val1 - s_val0) <==> s = 1
if s_val0 <= s_val1:
# In this branch the segments are moving in the same direction, i.e.
# (t=0<-->s=s_val0) are both less than (t=1<-->s_val1).
if 1.0 < s_val0:
return True, None
elif s_val0 < 0.0:
start_s = 0.0
start_t = -s_val0 / (s_val1 - s_val0)
else:
start_s = s_val0
start_t = 0.0
if s_val1 < 0.0:
return True, None
elif 1.0 < s_val1:
end_s = 1.0
end_t = (1.0 - s_val0) / (s_val1 - s_val0)
else:
end_s = s_val1
end_t = 1.0
else:
# In this branch the segments are moving in opposite directions, i.e.
# in (t=0<-->s=s_val0) and (t=1<-->s_val1) we have 0 < 1
# but ``s_val0 > s_val1``.
if s_val0 < 0.0:
return True, None
elif 1.0 < s_val0:
start_s = 1.0
start_t = (s_val0 - 1.0) / (s_val0 - s_val1)
else:
start_s = s_val0
start_t = 0.0
if 1.0 < s_val1:
return True, None
elif s_val1 < 0.0:
end_s = 0.0
end_t = s_val0 / (s_val0 - s_val1)
else:
end_s = s_val1
end_t = 1.0
parameters = np.asfortranarray([[start_s, end_s], [start_t, end_t]])
return False, parameters
def line_line_collide(line1, line2):
"""Determine if two line segments meet.
This is a helper for :func:`convex_hull_collide` in the
special case that the two convex hulls are actually
just line segments. (Even in this case, this is only
problematic if both segments are on a single line.)
Args:
line1 (numpy.ndarray): ``2 x 2`` array of start and end nodes.
line2 (numpy.ndarray): ``2 x 2`` array of start and end nodes.
Returns:
bool: Indicating if the line segments collide.
"""
s, t, success = segment_intersection(
line1[:, 0], line1[:, 1], line2[:, 0], line2[:, 1]
)
if success:
return _py_helpers.in_interval(
s, 0.0, 1.0
) and _py_helpers.in_interval(t, 0.0, 1.0)
else:
disjoint, _ = parallel_lines_parameters(
line1[:, 0], line1[:, 1], line2[:, 0], line2[:, 1]
)
return not disjoint
def convex_hull_collide(nodes1, nodes2):
"""Determine if the convex hulls of two curves collide.
.. note::
This is a helper for :func:`from_linearized`.
Args:
nodes1 (numpy.ndarray): Control points of a first curve.
nodes2 (numpy.ndarray): Control points of a second curve.
Returns:
bool: Indicating if the convex hulls collide.
"""
polygon1 = _py_helpers.simple_convex_hull(nodes1)
_, polygon_size1 = polygon1.shape
polygon2 = _py_helpers.simple_convex_hull(nodes2)
_, polygon_size2 = polygon2.shape
if polygon_size1 == 2 and polygon_size2 == 2:
return line_line_collide(polygon1, polygon2)
else:
return _py_helpers.polygon_collide(polygon1, polygon2)
def from_linearized(first, second, intersections):
"""Determine curve-curve intersection from pair of linearizations.
.. note::
This assumes that at least one of ``first`` and ``second`` is
not a line. The line-line case should be handled "early"
by :func:`check_lines`.
.. note::
This assumes the caller has verified that the bounding boxes
for ``first`` and ``second`` actually intersect.
If there is an intersection along the segments, adds that intersection
to ``intersections``. Otherwise, returns without doing anything.
Args:
first (Linearization): First curve being intersected.
second (Linearization): Second curve being intersected.
intersections (list): A list of existing intersections.
Raises:
ValueError: If ``first`` and ``second`` both have linearization error
of ``0.0`` (i.e. they are both lines). This is because this
function expects the caller to have used :func:`check_lines`
already.
"""
# NOTE: There is no corresponding "enable", but the disable only applies
# in this lexical scope.
# pylint: disable=too-many-return-statements
s, t, success = segment_intersection(
first.start_node, first.end_node, second.start_node, second.end_node
)
bad_parameters = False
if success:
if not (
_py_helpers.in_interval(s, 0.0, 1.0)
and _py_helpers.in_interval(t, 0.0, 1.0)
):
bad_parameters = True
else:
if first.error == 0.0 and second.error == 0.0:
raise ValueError(_UNHANDLED_LINES)
# Just fall back to a Newton iteration starting in the middle of
# the given intervals.
bad_parameters = True
s = 0.5
t = 0.5
if bad_parameters:
# In the unlikely case that we have parallel segments or segments
# that intersect outside of [0, 1] x [0, 1], we can still exit
# if the convex hulls don't intersect.
if not convex_hull_collide(first.curve.nodes, second.curve.nodes):
return
# Now, promote ``s`` and ``t`` onto the original curves.
orig_s = (1 - s) * first.curve.start + s * first.curve.end
orig_t = (1 - t) * second.curve.start + t * second.curve.end
refined_s, refined_t = intersection_helpers.full_newton(
orig_s, first.curve.original_nodes, orig_t, second.curve.original_nodes
)
refined_s, success = _py_helpers.wiggle_interval(refined_s)
if not success:
return
refined_t, success = _py_helpers.wiggle_interval(refined_t)
if not success:
return
add_intersection(refined_s, refined_t, intersections)
def add_intersection(s, t, intersections):
r"""Adds an intersection to list of ``intersections``.
.. note::
This is a helper for :func:`from_linearized` and :func:`endpoint_check`.
These functions are used (directly or indirectly) by
:func:`all_intersections` exclusively, and that function has a
Fortran equivalent.
Accounts for repeated intersection points. If the intersection has already
been found, does nothing.
If ``s`` is below :math:`2^{-10}`, it will be replaced with ``1 - s``
and compared against ``1 - s'`` for all ``s'`` already in
``intersections``. (Similar if ``t`` is below the
:attr:`~bezier.hazmat.intersection_helpers.ZERO_THRESHOLD`.)
This is perfectly "appropriate" since evaluating a B |eacute| zier curve
requires using both ``s`` and ``1 - s``, so both values are equally
relevant.
Compares :math:`\|p - q\|` to :math:`\|p\|` where :math:`p = (s, t)` is
current candidate intersection (or the "normalized" version, such as
:math:`p = (1 - s, t)`) and :math:`q` is one of the already added
intersections. If the difference is below :math:`2^{-36}` (i.e.
:attr:`~bezier.hazmat.intersection_helpers.NEWTON_ERROR_RATIO`)
then the intersection is considered to be duplicate.
Args:
s (float): The first parameter in an intersection.
t (float): The second parameter in an intersection.
intersections (list): List of existing intersections.
"""
if not intersections:
intersections.append((s, t))
return
if s < intersection_helpers.ZERO_THRESHOLD:
candidate_s = 1.0 - s
else:
candidate_s = s
if t < intersection_helpers.ZERO_THRESHOLD:
candidate_t = 1.0 - t
else:
candidate_t = t
norm_candidate = np.linalg.norm([candidate_s, candidate_t], ord=2)
for existing_s, existing_t in intersections:
# NOTE: |(1 - s1) - (1 - s2)| = |s1 - s2| in exact arithmetic, so
# we just compute ``s1 - s2`` rather than using
# ``candidate_s`` / ``candidate_t``. Due to round-off, these
# differences may be slightly different, but only up to machine
# precision.
delta_s = s - existing_s
delta_t = t - existing_t
norm_update = np.linalg.norm([delta_s, delta_t], ord=2)
if (
norm_update
< intersection_helpers.NEWTON_ERROR_RATIO * norm_candidate
):
return
intersections.append((s, t))
def endpoint_check(
first, node_first, s, second, node_second, t, intersections
):
r"""Check if curve endpoints are identical.
.. note::
This is a helper for :func:`tangent_bbox_intersection`. These
functions are used (directly or indirectly) by
:func:`all_intersections` exclusively, and that function has a
Fortran equivalent.
Args:
first (SubdividedCurve): First curve being intersected (assumed in
:math:`\mathbf{R}^2`).
node_first (numpy.ndarray): 1D ``2``-array, one of the endpoints
of ``first``.
s (float): The parameter corresponding to ``node_first``, so
expected to be one of ``0.0`` or ``1.0``.
second (SubdividedCurve): Second curve being intersected (assumed in
:math:`\mathbf{R}^2`).
node_second (numpy.ndarray): 1D ``2``-array, one of the endpoints
of ``second``.
t (float): The parameter corresponding to ``node_second``, so
expected to be one of ``0.0`` or ``1.0``.
intersections (list): A list of already encountered
intersections. If these curves intersect at their tangency,
then those intersections will be added to this list.
"""
if _py_helpers.vector_close(node_first, node_second):
orig_s = (1 - s) * first.start + s * first.end
orig_t = (1 - t) * second.start + t * second.end
add_intersection(orig_s, orig_t, intersections)
def tangent_bbox_intersection(first, second, intersections):
r"""Check if two curves with tangent bounding boxes intersect.
.. note::
This is a helper for :func:`intersect_one_round`. These
functions are used (directly or indirectly) by
:func:`all_intersections` exclusively, and that function has a
Fortran equivalent.
If the bounding boxes are tangent, intersection can
only occur along that tangency.
If the curve is **not** a line, the **only** way the curve can touch
the bounding box is at the endpoints. To see this, consider the
component
.. math::
x(s) = \sum_j W_j x_j.
Since :math:`W_j > 0` for :math:`s \in \left(0, 1\right)`, if there
is some :math:`k` with :math:`x_k < M = \max x_j`, then for any
interior :math:`s`
.. math::
x(s) < \sum_j W_j M = M.
If all :math:`x_j = M`, then :math:`B(s)` falls on the line
:math:`x = M`. (A similar argument holds for the other three
component-extrema types.)
.. note::
This function assumes callers will not pass curves that can be
linearized / are linear. In :func:`all_intersections`, curves
are pre-processed to do any linearization before the
subdivision / intersection process begins.
Args:
first (SubdividedCurve): First curve being intersected (assumed in
:math:`\mathbf{R}^2`).
second (SubdividedCurve): Second curve being intersected (assumed in
:math:`\mathbf{R}^2`).
intersections (list): A list of already encountered
intersections. If these curves intersect at their tangency,
then those intersections will be added to this list.
"""
node_first1 = first.nodes[:, 0]
node_first2 = first.nodes[:, -1]
node_second1 = second.nodes[:, 0]
node_second2 = second.nodes[:, -1]
endpoint_check(
first, node_first1, 0.0, second, node_second1, 0.0, intersections
)
endpoint_check(
first, node_first1, 0.0, second, node_second2, 1.0, intersections
)
endpoint_check(
first, node_first2, 1.0, second, node_second1, 0.0, intersections
)
endpoint_check(
first, node_first2, 1.0, second, node_second2, 1.0, intersections
)
def bbox_line_intersect(nodes, line_start, line_end):
r"""Determine intersection of a bounding box and a line.
We do this by first checking if either the start or end node of the
segment are contained in the bounding box. If they aren't, then
checks if the line segment intersects any of the four sides of the
bounding box.
.. note::
This function is "half-finished". It makes no distinction between
"tangent" intersections of the box and segment and other types
of intersection. However, the distinction is worthwhile, so this
function should be "upgraded" at some point.
Args:
nodes (numpy.ndarray): Points (``2 x N``) that determine a
bounding box.
line_start (numpy.ndarray): Beginning of a line segment (1D
``2``-array).
line_end (numpy.ndarray): End of a line segment (1D ``2``-array).
Returns:
int: Enum from :class:`.BoxIntersectionType` indicating the type of
bounding box intersection.
"""
left, right, bottom, top = _py_helpers.bbox(nodes)
if _py_helpers.in_interval(
line_start[0], left, right
) and _py_helpers.in_interval(line_start[1], bottom, top):
return BoxIntersectionType.INTERSECTION
if _py_helpers.in_interval(
line_end[0], left, right
) and _py_helpers.in_interval(line_end[1], bottom, top):
return BoxIntersectionType.INTERSECTION
# NOTE: We allow ``segment_intersection`` to fail below (i.e.
# ``success=False``). At first, this may appear to "ignore"
# some potential intersections of parallel lines. However,
# no intersections will be missed. If parallel lines don't
# overlap, then there is nothing to miss. If they do overlap,
# then either the segment will have endpoints on the box (already
# covered by the checks above) or the segment will contain an
# entire side of the box, which will force it to intersect the 3
# edges that meet at the two ends of those sides. The parallel
# edge will be skipped, but the other two will be covered.
# Bottom Edge
s_bottom, t_bottom, success = segment_intersection(
np.asfortranarray([left, bottom]),
np.asfortranarray([right, bottom]),
line_start,
line_end,
)
if (
success
and _py_helpers.in_interval(s_bottom, 0.0, 1.0)
and _py_helpers.in_interval(t_bottom, 0.0, 1.0)
):
return BoxIntersectionType.INTERSECTION
# Right Edge
s_right, t_right, success = segment_intersection(
np.asfortranarray([right, bottom]),
np.asfortranarray([right, top]),
line_start,
line_end,
)
if (
success
and _py_helpers.in_interval(s_right, 0.0, 1.0)
and _py_helpers.in_interval(t_right, 0.0, 1.0)
):
return BoxIntersectionType.INTERSECTION
# Top Edge
s_top, t_top, success = segment_intersection(
np.asfortranarray([right, top]),
np.asfortranarray([left, top]),
line_start,
line_end,
)
if (
success
and _py_helpers.in_interval(s_top, 0.0, 1.0)
and _py_helpers.in_interval(t_top, 0.0, 1.0)
):
return BoxIntersectionType.INTERSECTION
# NOTE: We skip the "last" edge. This is because any curve
# that doesn't have an endpoint on a curve must cross
# at least two, so we will have already covered such curves
# in one of the branches above.
return BoxIntersectionType.DISJOINT
def intersect_one_round(candidates, intersections):
"""Perform one step of the intersection process.
.. note::
This is a helper for :func:`all_intersections` and that function
has a Fortran equivalent.
Checks if the bounding boxes of each pair in ``candidates``
intersect. If the bounding boxes do not intersect, the pair
is discarded. Otherwise, the pair is "accepted". Then we
attempt to linearize each curve in an "accepted" pair and
track the overall linearization error for every curve
encountered.
Args:
candidates (Union[list, itertools.chain]): An iterable of
pairs of curves (or linearized curves).
intersections (list): A list of already encountered
intersections. If any intersections can be readily determined
during this round of subdivision, then they will be added
to this list.
Returns:
list: Returns a list of the next round of ``candidates``.
"""
next_candidates = []
# NOTE: In the below we replace ``isinstance(a, B)`` with
# ``a.__class__ is B``, which is a 3-3.5x speedup.
for first, second in candidates:
both_linearized = False
if first.__class__ is Linearization:
if second.__class__ is Linearization:
both_linearized = True
bbox_int = bbox_intersect(
first.curve.nodes, second.curve.nodes
)
else:
bbox_int = bbox_line_intersect(
second.nodes, first.start_node, first.end_node
)
else:
if second.__class__ is Linearization:
bbox_int = bbox_line_intersect(
first.nodes, second.start_node, second.end_node
)
else:
bbox_int = bbox_intersect(first.nodes, second.nodes)
if bbox_int == BoxIntersectionType.DISJOINT:
continue
if bbox_int == BoxIntersectionType.TANGENT and not both_linearized:
# NOTE: Ignore tangent bounding boxes in the linearized case
# because ``tangent_bbox_intersection()`` assumes that both
# curves are not linear.
tangent_bbox_intersection(first, second, intersections)
continue
if both_linearized:
# If both ``first`` and ``second`` are linearizations, then
# we can intersect them immediately.
from_linearized(first, second, intersections)
continue
# If we haven't ``continue``-d, add the accepted pair.
# NOTE: This may be a wasted computation, e.g. if ``first``
# or ``second`` occur in multiple accepted pairs (the caller
# only passes one pair at a time). However, in practice
# the number of such pairs will be small so this cost
# will be low.
lin1 = map(Linearization.from_shape, first.subdivide())
lin2 = map(Linearization.from_shape, second.subdivide())
next_candidates.extend(itertools.product(lin1, lin2))
return next_candidates
def prune_candidates(candidates):
"""Reduce number of candidate intersection pairs.
.. note::
This is a helper for :func:`all_intersections`.
Uses more strict bounding box intersection predicate by forming the
actual convex hull of each candidate curve segment and then checking
if those convex hulls collide.
Args:
candidates (List[Union[SubdividedCurve, Linearization]]): An iterable
of pairs of curves (or linearized curves).
Returns:
List[Union[SubdividedCurve, Linearization]]: A pruned list of curve
pairs.
"""
pruned = []
# NOTE: In the below we replace ``isinstance(a, B)`` with
# ``a.__class__ is B``, which is a 3-3.5x speedup.
for first, second in candidates:
if first.__class__ is Linearization:
nodes1 = first.curve.nodes
else:
nodes1 = first.nodes
if second.__class__ is Linearization:
nodes2 = second.curve.nodes
else:
nodes2 = second.nodes
if convex_hull_collide(nodes1, nodes2):
pruned.append((first, second))
return pruned
def make_same_degree(nodes1, nodes2):
"""Degree-elevate a curve so two curves have matching degree.
Args:
nodes1 (numpy.ndarray): Set of control points for a
B |eacute| zier curve.
nodes2 (numpy.ndarray): Set of control points for a
B |eacute| zier curve.
Returns:
Tuple[numpy.ndarray, numpy.ndarray]: The potentially degree-elevated
nodes passed in.
"""
_, num_nodes1 = nodes1.shape
_, num_nodes2 = nodes2.shape
for _ in range(num_nodes2 - num_nodes1):
nodes1 = curve_helpers.elevate_nodes(nodes1)
for _ in range(num_nodes1 - num_nodes2):
nodes2 = curve_helpers.elevate_nodes(nodes2)
return nodes1, nodes2
def coincident_parameters(nodes1, nodes2):
r"""Check if two B |eacute| zier curves are coincident.
Does so by projecting each segment endpoint onto the other curve
.. math::
B_1(s_0) = B_2(0) \\
B_1(s_m) = B_2(1) \\
B_1(0) = B_2(t_0) \\
B_1(1) = B_2(t_n)
and then finding the "shared interval" where both curves are defined.
If such an interval can't be found (e.g. if one of the endpoints can't be
located on the other curve), returns :data:`None`.
If such a "shared interval" does exist, then this will specialize
each curve onto that shared interval and check if the new control points
agree.
Args:
nodes1 (numpy.ndarray): Set of control points for a
B |eacute| zier curve.
nodes2 (numpy.ndarray): Set of control points for a
B |eacute| zier curve.
Returns:
Optional[Tuple[Tuple[float, float], ...]]: A ``2 x 2`` array of
parameters where the two coincident curves meet. If they are not
coincident, returns :data:`None`.
"""
# NOTE: There is no corresponding "enable", but the disable only applies
# in this lexical scope.
# pylint: disable=too-many-return-statements,too-many-branches
nodes1, nodes2 = make_same_degree(nodes1, nodes2)
s_initial = curve_helpers.locate_point(
nodes1, nodes2[:, 0].reshape((2, 1), order="F")
)
s_final = curve_helpers.locate_point(
nodes1, nodes2[:, -1].reshape((2, 1), order="F")
)
if s_initial is not None and s_final is not None:
# In this case, if the curves were coincident, then ``curve2``
# would be "fully" contained in ``curve1``, so we specialize
# ``curve1`` down to that interval to check.
specialized1 = curve_helpers.specialize_curve(
nodes1, s_initial, s_final
)
if _py_helpers.vector_close(
specialized1.ravel(order="F"), nodes2.ravel(order="F")
):
return ((s_initial, 0.0), (s_final, 1.0))
else:
return None
t_initial = curve_helpers.locate_point(
nodes2, nodes1[:, 0].reshape((2, 1), order="F")
)
t_final = curve_helpers.locate_point(
nodes2, nodes1[:, -1].reshape((2, 1), order="F")
)
if t_initial is None and t_final is None:
# An overlap must have two endpoints and since at most one of the
# endpoints of ``curve2`` lies on ``curve1`` (as indicated by at
# least one of the ``s``-parameters being ``None``), we need (at least)
# one endpoint of ``curve1`` on ``curve2``.
return None
if t_initial is not None and t_final is not None:
# In this case, if the curves were coincident, then ``curve1``
# would be "fully" contained in ``curve2``, so we specialize
# ``curve2`` down to that interval to check.
specialized2 = curve_helpers.specialize_curve(
nodes2, t_initial, t_final
)
if _py_helpers.vector_close(
nodes1.ravel(order="F"), specialized2.ravel(order="F")
):
return ((0.0, t_initial), (1.0, t_final))
else:
return None
if s_initial is None and s_final is None:
# An overlap must have two endpoints and since exactly one of the
# endpoints of ``curve1`` lies on ``curve2`` (as indicated by exactly
# one of the ``t``-parameters being ``None``), we need (at least)
# one endpoint of ``curve1`` on ``curve2``.
return None
# At this point, we know exactly one of the ``s``-parameters and exactly
# one of the ``t``-parameters is not ``None``.
if s_initial is None:
if t_initial is None:
# B1(s_final) = B2(1) AND B1(1) = B2(t_final)
start_s = s_final
end_s = 1.0
start_t = 1.0
end_t = t_final
else:
# B1(0) = B2(t_initial) AND B1(s_final) = B2(1)
start_s = 0.0
end_s = s_final
start_t = t_initial
end_t = 1.0
else:
if t_initial is None:
# B1(s_initial) = B2(0) AND B1(1 ) = B2(t_final)
start_s = s_initial
end_s = 1.0
start_t = 0.0
end_t = t_final
else:
# B1(0) = B2(t_initial) AND B1(s_initial) = B2(0)
start_s = 0.0
end_s = s_initial
start_t = t_initial
end_t = 0.0
width_s = abs(start_s - end_s)
width_t = abs(start_t - end_t)
if width_s < _MIN_INTERVAL_WIDTH and width_t < _MIN_INTERVAL_WIDTH:
return None
specialized1 = curve_helpers.specialize_curve(nodes1, start_s, end_s)
specialized2 = curve_helpers.specialize_curve(nodes2, start_t, end_t)
if _py_helpers.vector_close(
specialized1.ravel(order="F"), specialized2.ravel(order="F")
):
return ((start_s, start_t), (end_s, end_t))
else:
return None
def check_lines(first, second):
"""Checks if two curves are lines and tries to intersect them.
.. note::
This is a helper for :func:`.all_intersections`.
If they are not lines / not linearized, immediately returns :data:`False`
with no "return value".
If they are lines, attempts to intersect them (even if they are parallel
and share a coincident segment).
Args:
first (Union[SubdividedCurve, Linearization]): First curve being
intersected.
second (Union[SubdividedCurve, Linearization]): Second curve being
intersected.
Returns:
Tuple[bool, Optional[Tuple[numpy.ndarray, bool]]]: A pair of
* Flag indicating if both candidates in the pair are lines.
* Optional "result" populated only if both candidates are lines.
When this result is populated, it will be a pair of
* array of parameters of intersection
* flag indicating if the two candidates share a coincident segment
"""
# NOTE: In the below we replace ``isinstance(a, B)`` with
# ``a.__class__ is B``, which is a 3-3.5x speedup.
if not (
first.__class__ is Linearization
and second.__class__ is Linearization
and first.error == 0.0
and second.error == 0.0
):
return False, None
s, t, success = segment_intersection(
first.start_node, first.end_node, second.start_node, second.end_node
)
if success:
if _py_helpers.in_interval(s, 0.0, 1.0) and _py_helpers.in_interval(
t, 0.0, 1.0
):
intersections = np.asfortranarray([[s], [t]])
result = intersections, False
else:
result = np.empty((2, 0), order="F"), False
else:
disjoint, params = parallel_lines_parameters(
first.start_node,
first.end_node,
second.start_node,
second.end_node,
)
if disjoint:
result = | np.empty((2, 0), order="F") | numpy.empty |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 11:00:53 2020
@author: m102324
"""
import pysam
import numpy
from scipy import stats
def bam_info (bamfile, layout, frac = 0.2, n=500000):
'''
Extract DNA fragment size, read length and chrom sizes information from
BAM file. For PE, fragment size is estiamted from read pairs.
For SE, cannot estimate fragment size from the BAM file, therefore,
fragment size is set to None.
Parameters
----------
bamfile : str
Input BAM files.
layout : str
Must be "PE" (paired end) or "SE" (single end).
n : TYPE, int
Number of paired-end alignments sampled. The default is 500000.
frac : float
Fraction to cut off of both tails of the distribution.
Returns
-------
int
The median fragment size.
'''
samfile = pysam.AlignmentFile(bamfile,'rb')
chrom_sizes = dict(zip(samfile.references, samfile.lengths))
frag_counts = 0
frag_sizes = []
read_length = []
if layout == 'PE':
try:
while (1):
aligned_read = next(samfile)
if aligned_read.is_qcfail:continue
if aligned_read.is_duplicate:continue
if aligned_read.is_secondary:continue
if aligned_read.is_supplementary:continue
if aligned_read.is_unmapped:continue
if aligned_read.mate_is_unmapped:continue
if aligned_read.is_read2:continue
frag_counts +=1
frag_sizes.append(abs(aligned_read.template_length))
read_length.append(aligned_read.query_alignment_length)
#print (aligned_read.query_name + '\t' + str(aligned_read.template_length) )
if frag_counts > n:
break
except StopIteration:
pass
#the order of chroms must be consistent with those in bedGraph file.
#chrom_sizes = sorted(list(zip(samfile.references, samfile.lengths)))
return (stats.trim_mean(frag_sizes, frac), | numpy.mean(read_length) | numpy.mean |
"""Recon object for combining system blocks (such as datasets and transformers),
model blocks (such as CNNs and ResNets), and optimization blocks (such as conjugate
gradient descent)."""
#!/usr/bin/env python
import numpy as np
import torch
import sys
import pytorch_lightning as pl
from deepinpy.utils import utils
from deepinpy import opt
import deepinpy.utils.complex as cp
from deepinpy.forwards import MultiChannelMRIDataset
from torchvision.utils import make_grid
from torch.optim import lr_scheduler
@torch.jit.script
def calc_nrmse(gt, pred):
return (opt.ip_batch(pred - gt) / opt.ip_batch(gt)).sqrt().mean()
class Recon(pl.LightningModule):
"""An abstract class for implementing system-model-optimization (SMO) constructions.
The Recon is an abstract class which outlines common functionality for all SMO structure implementations. All of them share hyperparameter initialization, MCMRI dataset processing and loading, loss function, training step, and optimizer code. Each implementation of Recon must provide batch, forward, and get_metadata methods in order to define how batches are created from the data, how the model performs its forward pass, and what metadata the user should be able to return. Currently, Recon automatically builds the dataset as an MultiChannelMRIDataset object; overload _build_data to circumvent this.
Args:
hprams (dict): Key-value pairings with parameter names as keys.
Attributes:
hprams (dict): Key-value pairings with hyperparameter names as keys.
_loss_fun (func): Set to use either torch.nn.MSELoss or _abs_loss_fun.
D (MultiChannelMRIDataset): Holds the MCMRI dataset.
"""
def __init__(self, hparams):
super(Recon, self).__init__()
self._init_hparams(hparams)
self._build_data()
self.scheduler = None
def _init_hparams(self, hparams):
self.hparams = hparams
self._loss_fun = torch.nn.MSELoss(reduction='sum')
if hparams.abs_loss:
self.loss_fun = self._abs_loss_fun
else:
self.loss_fun = self._loss_fun
def _build_data(self):
self.D = MultiChannelMRIDataset(data_file=self.hparams.data_file, stdev=self.hparams.stdev, num_data_sets=self.hparams.num_data_sets, adjoint=False, id=0, clear_cache=False, cache_data=False, scale_data=False, fully_sampled=self.hparams.fully_sampled, data_idx=None, inverse_crime=self.hparams.inverse_crime, noncart=self.hparams.noncart)
def _abs_loss_fun(self, x_hat, imgs):
x_hat_abs = torch.sqrt(x_hat.pow(2).sum(dim=-1))
imgs_abs = torch.sqrt(imgs.pow(2).sum(dim=-1))
return self._loss_fun(x_hat_abs, imgs_abs)
def batch(self, data):
"""Not implemented, should define a forward operator A and the adjoint matrix of the input x.
Args:
data (Tensor): The data which the batch will be drawn from.
Raises:
NotImplementedError: Method needs to be implemented.
"""
raise NotImplementedError
def forward(self, y):
"""Not implemented, should perform a prediction using the implemented model.
Args:
y (Tensor): The data which will be passed to the model for processing.
Returns:
The model’s prediction in Tensor form.
Raises:
NotImplementedError: Method needs to be implemented.
"""
def get_metadata(self):
"""Accesses metadata for the Recon.
Returns:
A dict holding the Recon’s metadata.
Raises:
NotImplementedError: Method needs to be implemented.
"""
raise NotImplementedError
# FIXME: batch_nb parameter appears unused.
def training_step(self, batch, batch_nb):
"""Defines a training step solving deep inverse problems, including batching, performing a forward pass through
the model, and logging data. This may either be supervised or unsupervised based on hyperparameters.
Args:
batch (tuple): Should hold the indices of data and the corresponding data, in said order.
batch_nb (None): Currently unimplemented.
Returns:
A dict holding performance data and current epoch for performance tracking over time.
"""
idx, data = batch
idx = utils.itemize(idx)
imgs = data['imgs']
inp = data['out']
self.batch(data)
x_hat = self.forward(inp)
try:
num_cg = self.get_metadata()['num_cg']
except KeyError:
num_cg = 0
if self.logger and (self.current_epoch % self.hparams.save_every_N_epochs == 0 or self.current_epoch == self.hparams.num_epochs - 1):
_b = inp.shape[0]
if _b == 1 and idx == 0:
_idx = 0
elif _b > 1 and 0 in idx:
_idx = idx.index(0)
else:
_idx = None
if _idx is not None:
with torch.no_grad():
if self.x_adj is None:
x_adj = self.A.adjoint(inp)
else:
x_adj = self.x_adj
_x_hat = utils.t2n(x_hat[_idx,...])
_x_gt = utils.t2n(imgs[_idx,...])
_x_adj = utils.t2n(x_adj[_idx,...])
if len(_x_hat.shape) > 2:
_d = tuple(range(len(_x_hat.shape)-2))
_x_hat_rss = np.linalg.norm(_x_hat, axis=_d)
_x_gt_rss = np.linalg.norm(_x_gt, axis=_d)
_x_adj_rss = np.linalg.norm(_x_adj, axis=_d)
myim = torch.tensor(np.stack((_x_adj_rss, _x_hat_rss, _x_gt_rss), axis=0))[:, None, ...]
grid = make_grid(myim, scale_each=True, normalize=True, nrow=8, pad_value=10)
self.logger.experiment.add_image('3_train_prediction_rss', grid, self.current_epoch)
while len(_x_hat.shape) > 2:
_x_hat = _x_hat[0,...]
_x_gt = _x_gt[0,...]
_x_adj = _x_adj[0,...]
myim = torch.tensor(np.stack((np.abs(_x_hat), np.angle(_x_hat)), axis=0))[:, None, ...]
grid = make_grid(myim, scale_each=True, normalize=True, nrow=8, pad_value=10)
self.logger.experiment.add_image('2_train_prediction', grid, self.current_epoch)
if self.current_epoch == 0:
myim = torch.tensor(np.stack((np.abs(_x_gt), np.angle(_x_gt)), axis=0))[:, None, ...]
grid = make_grid(myim, scale_each=True, normalize=True, nrow=8, pad_value=10)
self.logger.experiment.add_image('1_ground_truth', grid, 0)
myim = torch.tensor(np.stack((np.abs(_x_adj), | np.angle(_x_adj) | numpy.angle |
"""
Feature extraction
"""
# Author: <NAME> <<EMAIL>>
#
# License: Apache, Version 2.0
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.metrics import adjusted_mutual_info_score
from scipy.special import psi
from scipy.stats.stats import pearsonr
from scipy.stats import skew, kurtosis
from collections import Counter, defaultdict
from multiprocessing import Pool
import pandas as pd
import operator
from .hsic import FastHsicTestGamma
import math
BINARY = "Binary"
CATEGORICAL = "Categorical"
NUMERICAL = "Numerical"
class FeatureMapper:
def __init__(self, features):
self.features = features
def fit(self, X, y=None):
for feature_name in self.features:
extractor.fit(X[feature_name].values[:, np.newaxis], y)
def transform(self, X):
return X[self.features].values
def fit_transform(self, X, y=None):
return self.transform(X)
def weighted_mean_and_std(values, weights):
"""
Returns the weighted average and standard deviation.
values, weights -- numpy ndarrays with the same shape.
"""
average = np.average(values, weights=weights, axis=0)
variance = np.dot(weights, (values - average) ** 2) / weights.sum() # Fast and numerically precise
return (average, np.sqrt(variance))
def count_unique(x):
try:
return len(set(x))
except TypeError:
return len(set(x.flat))
def count_unique_ratio(x):
try:
return len(set(x)) / float(len(x))
except TypeError:
return len(set(x.flat))/float(len(x))
def binary(tp):
assert type(tp) is str
return tp == BINARY
def categorical(tp):
assert type(tp) is str
return tp == CATEGORICAL
def numerical(tp):
assert type(tp) is str
return tp == NUMERICAL
def binary_entropy(p, base):
assert p <= 1 and p >= 0
h = -(p * np.log(p) + (1 - p) * np.log(1 - p)) if (p != 0) and (p != 1) else 0
return h / np.log(base)
def discrete_probability(x, tx, ffactor, maxdev):
x = discretized_sequence(x, tx, ffactor, maxdev)
try:
return Counter(x)
except TypeError as e:
return Counter(np.array(x).flat) if isinstance(x, list) else Counter(x.flat)
def discretized_values(x, tx, ffactor, maxdev):
if numerical(tx) and count_unique(x) > (2 * ffactor * maxdev + 1):
vmax = ffactor * maxdev
vmin = -ffactor * maxdev
return range(vmin, vmax + 1)
else:
try:
return sorted(list(set(x)))
except TypeError:
return sorted(list(set(x.flat)))
def len_discretized_values(x, tx, ffactor, maxdev):
return len(discretized_values(x, tx, ffactor, maxdev))
def discretized_sequence(x, tx, ffactor, maxdev, norm=True):
if not norm or (numerical(tx) and count_unique(x) > len_discretized_values(x, tx, ffactor, maxdev)):
if norm:
x = (x - np.mean(x)) / np.std(x)
xf = x[abs(x) < maxdev]
x = (x - np.mean(xf)) / np.std(xf)
x = np.round(x * ffactor)
vmax = ffactor * maxdev
vmin = -ffactor * maxdev
x[x > vmax] = vmax
x[x < vmin] = vmin
return x
def discretized_sequences(x, tx, y, ty, ffactor=3, maxdev=3):
return discretized_sequence(x, tx, ffactor, maxdev), discretized_sequence(y, ty, ffactor, maxdev)
def normalized_error_probability(x, tx, y, ty, ffactor=3, maxdev=3):
x, y = discretized_sequences(x, tx, y, ty, ffactor, maxdev)
try:
cx = Counter(x)
cy = Counter(y)
except TypeError:
cx = Counter(x.flat)
cy = Counter(y.flat)
nx = len(cx)
ny = len(cy)
pxy = defaultdict(lambda: 0)
try:
for p in zip(x, y):
pxy[p] += 1
except TypeError:
for p in zip(x.flat, y.flat):
pxy[p] += 1
pxy = np.array([[pxy[(a, b)] for b in cy] for a in cx], dtype=float)
pxy = pxy / pxy.sum()
perr = 1 - np.sum(pxy.max(axis=1))
max_perr = 1 - np.max(pxy.sum(axis=0))
pnorm = perr / max_perr if max_perr > 0 else perr
return pnorm
def discrete_entropy(x, tx, ffactor=3, maxdev=3, bias_factor=0.7):
c = discrete_probability(x, tx, ffactor, maxdev)
# print(c, len(c))
pk = np.array(list(c.values()), dtype=float)
pk = pk / pk.sum()
vec = pk * np.log(pk)
S = -np.sum(vec, axis=0)
return S + bias_factor * (len(pk) - 1) / float(2 * len(list(x)))
def discrete_divergence(cx, cy):
for a, v in cx.most_common():
if cy[a] == 0:
cy[a] = 1
nx = float(sum(cx.values()))
ny = float(sum(cy.values()))
sum = 0.
for a, v in cx.most_common():
px = v / nx
py = cy[a] / ny
sum += px * np.log(px / py)
return sum
def discrete_joint_entropy(x, tx, y, ty, ffactor=3, maxdev=3):
x, y = discretized_sequences(x, tx, y, ty, ffactor, maxdev)
return discrete_entropy(list(zip(x, y)), CATEGORICAL)
def normalized_discrete_joint_entropy(x, tx, y, ty, ffactor=3, maxdev=3):
x, y = discretized_sequences(x, tx, y, ty, ffactor, maxdev)
e = discrete_entropy(list(zip(x, y)), CATEGORICAL)
nx = len_discretized_values(x, tx, ffactor, maxdev)
ny = len_discretized_values(y, ty, ffactor, maxdev)
if nx * ny > 0: e = e / np.log(nx * ny)
return e
def discrete_conditional_entropy(x, tx, y, ty):
return discrete_joint_entropy(x, tx, y, ty) - discrete_entropy(y, ty)
def adjusted_mutual_information(x, tx, y, ty, ffactor=3, maxdev=3):
x, y = discretized_sequences(x, tx, y, ty, ffactor, maxdev)
try:
return adjusted_mutual_info_score(x, y)
except ValueError:
return adjusted_mutual_info_score(x.squeeze(1), y.squeeze(1))
def discrete_mutual_information(x, tx, y, ty):
ex = discrete_entropy(x, tx)
ey = discrete_entropy(y, ty)
exy = discrete_joint_entropy(x, tx, y, ty)
mxy = max((ex + ey) - exy,
0) # Mutual information is always positive: max() avoid negative values due to numerical errors
return mxy
def normalized_discrete_entropy(x, tx, ffactor=3, maxdev=3):
e = discrete_entropy(x, tx, ffactor, maxdev)
n = len_discretized_values(x, tx, ffactor, maxdev)
if n > 0: e = e / np.log(n)
return e
# Continuous information measures
def to_numerical(x, y):
dx = defaultdict(lambda: np.zeros(2))
for i, a in enumerate(x):
dx[a][0] += y[i]
dx[a][1] += 1
for a in dx.keys():
dx[a][0] /= dx[a][1]
x = np.array([dx[a][0] for a in x], dtype=float)
return x
def normalize(x, tx):
if not numerical(tx): # reassign labels according to its frequency
try:
cx = Counter(x)
except TypeError:
cx = Counter(x.flat)
xmap = dict()
# nx = len(cx)
# center = nx/2 if (nx % 4) == 0 else (nx-1)//2
# for i, k in enumerate(cx.most_common()):
# offset = (i+1)//2
# if (i % 4) > 1: offset = -offset
# xmap[k[0]] = center + offset
for i, k in enumerate(cx.most_common()):
xmap[k[0]] = i
y = np.array([xmap[a] for a in x.flat], dtype=float)
else:
y = x
y = y - np.mean(y)
if np.std(y) > 0:
y = y / np.std(y)
return y
def normalized_entropy_baseline(x, tx):
try:
if len(set(x)) < 2:
return 0
except TypeError:
if len(set(x.flat)) < 2:
return 0
x = normalize(x, tx)
xs = np.sort(x)
delta = xs[1:] - xs[:-1]
delta = delta[delta != 0]
hx = np.mean(np.log(delta))
hx += psi(len(delta))
hx -= psi(1)
return hx
def normalized_entropy(x, tx, m=2):
x = normalize(x, tx)
try:
cx = Counter(x)
except TypeError:
cx = Counter(x.flat)
if len(cx) < 2:
return 0
xk = np.array(list(cx.keys()), dtype=float)
xk.sort()
delta = (xk[1:] - xk[:-1]) / m
counter = np.array([cx[i] for i in xk], dtype=float)
hx = np.sum(counter[1:] * np.log(delta / counter[1:])) / len(x)
hx += (psi(len(delta)) - np.log(len(delta)))
hx += np.log(len(x))
hx -= (psi(m) - np.log(m))
return hx
def igci(x, tx, y, ty):
try:
if len(set(x)) < 2:
return 0
except TypeError:
if len(set(x.flat)) < 2:
return 0
x = normalize(x, tx)
y = normalize(y, ty)
if len(x) != len(set(x.flat)):
dx = defaultdict(lambda: np.zeros(2))
for i, a in enumerate(x.flat):
dx[a][0] += y[i]
dx[a][1] += 1
for a in dx.keys():
dx[a][0] /= dx[a][1]
xy = np.array(sorted([[a, dx[a][0]] for a in dx.keys()]), dtype=float)
counter = np.array([dx[a][1] for a in xy[:, 0]], dtype=float)
else:
xy = np.array(sorted(zip(x, y)), dtype=float)
counter = np.ones(len(x))
delta = xy[1:] - xy[:-1]
if len(delta.shape) > 2:
delta = delta.squeeze(2)
selec = delta[:, 1] != 0
delta = delta[selec]
counter = np.min([counter[1:], counter[:-1]], axis=0)
counter = counter[selec]
hxy = np.sum(counter * np.log(delta[:, 0] / np.abs(delta[:, 1]))) / len(x)
return hxy
def uniform_divergence(x, tx, m=2):
x = normalize(x, tx)
try:
cx = Counter(x)
except TypeError:
cx = Counter(x.flat)
xk = np.array(list(cx.keys()), dtype=float)
xk.sort()
delta = np.zeros(len(xk))
if len(xk) > 1:
delta[0] = xk[1] - xk[0]
delta[1:-1] = (xk[m:] - xk[:-m]) / m
delta[-1] = xk[-1] - xk[-2]
else:
delta = np.array(np.sqrt(12))
counter = np.array([cx[i] for i in xk], dtype=float)
delta = delta / np.sum(delta)
hx = np.sum(counter * np.log(counter / delta)) / len(x)
hx -= np.log(len(x))
hx += (psi(m) - np.log(m))
return hx
def normalized_skewness(x, tx):
y = normalize(x, tx)
return skew(y)
def normalized_kurtosis(x, tx):
y = normalize(x, tx)
return kurtosis(y)
def normalized_moment(x, tx, y, ty, n, m):
x = normalize(x, tx)
y = normalize(y, ty)
return np.mean((x ** n) * (y ** m))
def moment21(x, tx, y, ty):
return normalized_moment(x, tx, y, ty, 2, 1)
def moment22(x, tx, y, ty):
return normalized_moment(x, tx, y, ty, 2, 2)
def moment31(x, tx, y, ty):
return normalized_moment(x, tx, y, ty, 3, 1)
def fit(x, tx, y, ty):
if (not numerical(tx)) or (not numerical(ty)):
return 0
if (count_unique(x) <= 2) or (count_unique(y) <= 2):
return 0
x = (x - np.mean(x)) / np.std(x)
y = (y - np.mean(y)) / np.std(y)
if len(x.shape) > 1:
x = x.squeeze(1)
if len(y.shape) > 1:
y = y.squeeze(1)
xy1 = np.polyfit(x, y, 1)
xy2 = np.polyfit(x, y, 2)
return abs(2 * xy2[0]) + abs(xy2[1] - xy1[0])
def fit_error(x, tx, y, ty, m=2):
if categorical(tx) and categorical(ty):
x = normalize(x, tx)
y = normalize(y, ty)
elif categorical(tx) and numerical(ty):
x = to_numerical(x, y)
elif numerical(tx) and categorical(ty):
y = to_numerical(y, x)
x = (x - np.mean(x)) / np.std(x)
y = (y - np.mean(y)) / np.std(y)
if len(x.shape) > 1:
x = x.squeeze(1)
if len(y.shape) > 1:
y = y.squeeze(1)
if (count_unique(x) <= m) or (count_unique(y) <= m):
xy = np.polyfit(x, y, min(count_unique(x), count_unique(y)) - 1)
else:
xy = np.polyfit(x, y, m)
return np.std(y - np.polyval(xy, x))
def fit_noise_entropy(x, tx, y, ty, ffactor=3, maxdev=3, minc=10):
x, y = discretized_sequences(x, tx, y, ty, ffactor, maxdev)
try:
cx = Counter(x)
except TypeError:
cx = Counter(x.flat)
entyx = []
for a in cx:
if cx[a] > minc:
entyx.append(discrete_entropy(y[x == a], CATEGORICAL))
if len(entyx) == 0: return 0
n = len_discretized_values(y, ty, ffactor, maxdev)
return np.std(entyx) / np.log(n)
def fit_noise_skewness(x, tx, y, ty, ffactor=3, maxdev=3, minc=8):
xd, yd = discretized_sequences(x, tx, y, ty, ffactor, maxdev)
try:
cx = Counter(xd)
except TypeError:
cx = Counter(xd.flat)
skewyx = []
for a in cx:
if cx[a] >= minc:
skewyx.append(normalized_skewness(y[xd == a], ty))
if len(skewyx) == 0: return 0
return np.std(skewyx)
def fit_noise_kurtosis(x, tx, y, ty, ffactor=3, maxdev=3, minc=8):
xd, yd = discretized_sequences(x, tx, y, ty, ffactor, maxdev)
try:
cx = Counter(xd)
except TypeError:
cx = Counter(xd.flat)
kurtyx = []
for a in cx:
if cx[a] >= minc:
kurtyx.append(normalized_kurtosis(y[xd == a], ty))
if len(kurtyx) == 0: return 0
return np.std(kurtyx)
def conditional_distribution_similarity(x, tx, y, ty, ffactor=2, maxdev=3, minc=12):
xd, yd = discretized_sequences(x, tx, y, ty, ffactor, maxdev)
try:
cx = Counter(xd)
cy = Counter(yd)
except TypeError:
cx = Counter(xd.flat)
cy = Counter(yd.flat)
yrange = sorted(cy.keys())
ny = len(yrange)
py = np.array([cy[i] for i in yrange], dtype=float)
py = py / py.sum()
pyx = []
for a in cx:
if cx[a] > minc:
yx = y[xd == a]
if not numerical(ty):
cyx = Counter(yx)
pyxa = np.array([cyx[i] for i in yrange], dtype=float)
pyxa.sort()
elif count_unique(y) > len_discretized_values(y, ty, ffactor, maxdev):
yx = (yx - np.mean(yx)) / np.std(y)
yx = discretized_sequence(yx, ty, ffactor, maxdev, norm=False)
cyx = Counter(yx.astype(int))
pyxa = np.array([cyx[i] for i in discretized_values(y, ty, ffactor, maxdev)], dtype=float)
else:
cyx = Counter(yx)
pyxa = [cyx[i] for i in yrange]
pyxax = np.array([0] * (ny - 1) + pyxa + [0] * (ny - 1), dtype=float)
xcorr = [sum(py * pyxax[i:i + ny]) for i in range(2 * ny - 1)]
imax = xcorr.index(max(xcorr))
pyxa = np.array([0] * (2 * ny - 2 - imax) + pyxa + [0] * imax, dtype=float)
assert pyxa.sum() == cx[a]
pyxa = pyxa / pyxa.sum()
pyx.append(pyxa)
if len(pyx) == 0: return 0
pyx = np.array(pyx);
pyx = pyx - pyx.mean(axis=0);
return | np.std(pyx) | numpy.std |
import numpy as np
from typing import List, Callable, Union, Optional, Any
from scipy.special import digamma
import lmfit as lm
import pandas as pd
from scipy.signal import savgol_filter
import logging
from ... import core_util as CU
from . import dat_attribute as DA
logger = logging.getLogger(__name__)
FIT_NUM_BINS = 1000
def default_transition_params():
_pars = lm.Parameters()
_pars.add_many(
('mid', 0, True, None, None, None, None),
('theta', 20, True, 0.01, None, None, None),
('amp', 1, True, 0, None, None, None),
('lin', 0, True, 0, None, None, None),
('const', 5, True, None, None, None, None))
return _pars
def i_sense(x, mid, theta, amp, lin, const):
""" fit to sensor current """
arg = (x - mid) / (2 * theta)
return -amp / 2 * np.tanh(arg) + lin * (x - mid) + const
def i_sense_strong(x, mid, theta, amp, lin, const):
arg = (x - mid) / theta
return (-amp * np.arctan(arg) / np.pi) * 2 + lin * (x - mid) + const
def func_no_nan_eval(x: Any, func: Callable):
"""Removes nans BEFORE calling function. Necessary for things like scipy.digamma which is EXTREMELY slow with
np.nans present in input
Returns similar input (i.e. list if list entered, array if array entered, float if float or int entered)
"""
if np.sum(np.isnan(np.asanyarray(x))) == 0:
return func(x)
t = type(x)
x = np.array(x, ndmin=1)
no_nans = np.where(~np.isnan(x))
arr = np.zeros(x.shape)
arr[np.where(np.isnan(x))] = np.nan
arr[no_nans] = func(x[no_nans])
if t != np.ndarray: # Return back to original type
if t == int:
arr = float(arr)
else:
arr = t(arr)
return arr
def i_sense_digamma(x, mid, g, theta, amp, lin, const):
def func_no_nans(x_no_nans):
arg = digamma(0.5 + (x_no_nans - mid + 1j * g) / (2 * np.pi * 1j * theta)) # j is imaginary i
return amp * (0.5 + np.imag(arg) / np.pi) + lin * (
x_no_nans - mid) + const - amp / 2 # -amp/2 so const term coincides with i_sense
return func_no_nan_eval(x, func_no_nans)
def i_sense_digamma_quad(x, mid, g, theta, amp, lin, const, quad):
def func_no_nans(x_no_nans):
arg = digamma(0.5 + (x_no_nans - mid + 1j * g) / (2 * np.pi * 1j * theta)) # j is imaginary i
return amp * (0.5 + np.imag(arg) / np.pi) + quad * (x_no_nans - mid) ** 2 + lin * (
x_no_nans - mid) + const - amp / 2 # -amp/2 so const term coincides with i_sense
return func_no_nan_eval(x, func_no_nans)
def i_sense_digamma_amplin(x, mid, g, theta, amp, lin, const, amplin):
def func_no_nans(x_):
arg = digamma(0.5 + (x_ - mid + 1j * g) / (2 * np.pi * 1j * theta)) # j is imaginary i
return (amp + amplin * x_) * (0.5 + np.imag(arg) / np.pi) + lin * (
x_ - mid) + const - (amp + amplin * mid) / 2 # -amp/2 so const term coincides with i_sense
return func_no_nan_eval(x, func_no_nans)
class Transition(DA.FittingAttribute):
version = '2.0.0'
group_name = 'Transition'
description = 'Fitting to charge transition (measured by charge sensor qpc). Expects data with name "i_sense"'
DEFAULT_DATA_NAME = 'i_sense'
def default_data_names(self) -> List[str]:
return ['x', 'i_sense']
def clear_caches(self):
super().clear_caches()
def get_centers(self) -> List[float]:
logger.info(f'Dat{self.dat.datnum}: Starting Transition Center Fits')
return [fit.best_values.mid for fit in self.row_fits]
def get_default_params(self, x: Optional[np.ndarray] = None,
data: Optional[np.ndarray] = None) -> Union[List[lm.Parameters], lm.Parameters]:
if x is not None and data is not None:
params = get_param_estimates(x, data)
if len(params) == 1:
params = params[0]
return params
else:
return default_transition_params()
def get_default_func(self) -> Callable[[Any], float]:
return i_sense
def initialize_additional_FittingAttribute_minimum(self):
pass
# class OldTransitions(DA.FittingAttribute):
# version = '1.1'
# group_name = 'Transition'
#
# """
# Versions:
# 1.1 -- 20-7-20: Changed averaging to use center values not IDs. Better way of centering data
# """
#
# def __init__(self, dat):
# super().__init__(dat)
# # Below set in super()
# # self.x = None
# # self.y = None
# # self.data = None
# # self.avg_data = None
# # self.avg_data_err = None
# # self.fit_func = None
# # self.all_fits = None # type: Union[List[DHU.FitInfo], None]
# # self.avg_fit = None # type: Union[DHU.FitInfo, None]
# #
# # self.get_from_HDF()
#
# def get_from_HDF(self):
# super().get_from_HDF() # Gets self.x/y/avg_fit/all_fits
# tdg = self.group.get('Data', None)
# if tdg is not None:
# self.data = tdg.get('i_sense', None)
# self.avg_data = tdg.get('avg_i_sense', None)
# self.avg_data_err = tdg.get('avg_i_sense_err', None)
#
# def update_HDF(self):
# super().update_HDF()
#
# def _set_data_hdf(self, **kwargs):
# super()._set_data_hdf(data_name='i_sense')
#
# def run_row_fits(self, params=None, fit_func=None, auto_bin=True):
# params = super().run_row_fits(params=params) # checks data and checks tries getting params from avg_fit if None
#
# # Have to override fitting here because Transition fit has 'func' arg
# x = self.x[:]
# data = self.data[:]
# self.fit_func = fit_func if fit_func is not None else self.fit_func
# row_fits = transition_fits(x, data, params=params, func=self.fit_func, auto_bin=auto_bin)
# fit_infos = [dat_analysis.dat_object.Attributes.DatAttribute.FitInfo() for _ in row_fits]
# for fi, rf in zip(fit_infos, row_fits):
# fi.init_from_fit(rf)
# self.all_fits = fit_infos
# self._set_row_fits_hdf()
#
# def _set_row_fits_hdf(self):
# """Save fit_info per row to HDF"""
# super()._set_row_fits_hdf()
#
# def set_avg_data(self, *args, **kwargs):
# centers = np.array([f.best_values.mid for f in self.all_fits])
# super().set_avg_data(centers) # Sets self.avg_data, self.avg_data_err and saves to HDF
#
# def _set_avg_data_hdf(self):
# dg = self.group['Data']
# if self.avg_data is not None:
# for key in ('avg_i_sense', 'avg_i_sense_err'):
# if dg.get(key, None) is not None:
# logger.info(f'Overwriting {key} in {dg.name}')
# del dg[key]
# dg['avg_i_sense'] = self.avg_data
# dg['avg_i_sense_err'] = self.avg_data_err
#
# def run_avg_fit(self, params=None, fit_func=None, auto_bin=True):
# params = super().run_avg_fit(params=params)
# self.fit_func = fit_func if fit_func is not None else self.fit_func
#
# x = self.x[:]
# data = self.avg_data[:]
# fit = transition_fits(x, data, params=params, func=self.fit_func, auto_bin=auto_bin)[0]
# fit_info = dat_analysis.dat_object.Attributes.DatAttribute.FitInfo()
# fit_info.init_from_fit(fit)
# self.avg_fit = fit_info
# self._set_avg_fit_hdf()
#
# def _set_avg_fit_hdf(self):
# super()._set_avg_fit_hdf()
#
# def _check_default_group_attrs(self):
# super()._check_default_group_attrs()
#
#
#
# class Transition(object):
# version = '4.0' # To keep track of whether fitting has changed
# """
# Version Changes:
# 1.3 -- Added T.g and T.fit_values.gs for digamma_fit
# 2.0 -- Added _avg_full_fit and avg_fit_values
# 2.1 -- Omitting NaNs
# 2.2 -- Change i_sense function to have amp/2 so that it fits with di_gamma function
# 2.3 -- Recalculate average values which show up in datdf after refitting data
# 2.4 -- self.fit_func defaults to 'i_sense' instead of 'None' now.
# 3.0 -- added i_sense_digamma_quad 28/4/20.
# Also changed i_sense_digamma linear part to be (x-mid) instead of just x. Will affect previous dats
# 3.1 -- added self.fit_func_name which should get stored in datdf
# 4.0 -- digamma function changed! Change g/2 to g only in order to align with Yigal and others
# """
#
# def __init__(self, x_array, transition_data, fit_function=None):
# """Defaults to fitting with cosh shape transition"""
# if fit_function is None:
# fit_function = i_sense
# self._data = np.array(transition_data)
# self._avg_data = None # Initialized in avg_full_fit
# self._x_array = x_array
# self.version = Transition.version
# self._full_fits = transition_fits(x_array, self._data, get_param_estimates(x_array, self._data), func=fit_function)
# self.fit_func = fit_function
# self.fit_func_name = fit_function.__name__
# self._avg_full_fit = self.avg_transition_fits()
#
# # Mostly just for convenience when working in console
# self.mid = None # type: Union[float, None]
# self.theta = None # type: Union[float, None]
# self.amp = None # type: Union[float, None]
# self.lin = None # type: Union[float, None]
# self.const = None # type: Union[float, None]
# self.g = None # type: Union[float, None]
# self.set_average_fit_values()
#
# @property
# def init_params(self):
# return[fit.init_params for fit in self._full_fits]
#
# @property
# def params(self):
# return [fit.params for fit in self._full_fits]
#
# @property
# def avg_params(self):
# return self._avg_full_fit.params
#
# @property
# def fit_values(self):
# return self.get_fit_values()
#
# @property
# def avg_fit_values(self):
# return self.get_fit_values(avg=True)
#
# @property
# def avg_x_array(self):
# return self._avg_full_fit.userkws['x']
#
# def avg_transition_fits(self):
# """Fits to averaged data (based on middle of individual fits and using full_fit[0] params)"""
# self._avg_data, self._avg_data_err = CU.average_data(self._data, [CU.get_data_index(self._x_array, f.best_values['mid']) for f in self._full_fits])
# return transition_fits(self._x_array, self._avg_data, [self._full_fits[0].params], func=self.fit_func)[0]
#
# def recalculate_fits(self, params=None, func=None):
# """Method to recalculate fits using new parameters or new fit_function"""
# if params is None:
# params = self.params
# if func is None and self.fit_func is not None:
# func = self.fit_func
# print(f'Using self.fit_func as func to recalculate with: [{self.fit_func.__name__}]')
# elif func is None:
# func = i_sense
# print(f'Using standard i_sense as func to recalculate with')
# else:
# pass
#
# self._full_fits = transition_fits(self._x_array, self._data, params, func=func)
# self._avg_data, _ = CU.average_data(self._data, [CU.get_data_index(self._x_array, f.best_values['mid']) for f in self._full_fits])
# self._avg_full_fit = transition_fits(self._x_array, self._avg_data, [self._full_fits[0].params], func=func)[0]
# self.fit_func = func
# self.fit_func_name = func.__name__
# self.set_average_fit_values()
# self.version = Transition.version
#
# def set_average_fit_values(self):
# if self.fit_values is not None:
# for i, key in enumerate(self.fit_values._fields):
# if self.fit_values[i] is None:
# avg = None
# else:
# avg = np.average(self.fit_values[i])
# exec(f'self.{key[:-1]} = {avg}') # Keys in fit_values should all end in 's'
#
# def get_fit_values(self, avg=False) -> Union[NamedTuple, None]:
# """Takes values from param fits and puts them in NamedTuple"""
# if avg is False:
# params = self.params
# elif avg is True:
# params = [self.avg_params] # Just to make it work with same code below, but totally overkill for avg_values
# else:
# params = None
# if params is not None:
# data = {k+'s': [param[k].value for param in params] for k in params[0].keys()} # makes dict of all
# # param values for each key name. e.g. {'mids': [1,2,3], 'thetas':...}
# return dat_analysis.Builders.Util.data_to_NamedTuple(data, FitValues)
# else:
# return None
#
# def plot_transition1d(self, y_array, yval, ax=None, s=10, transx=0, transy=0, yisindex=0, notext=0):
# if yisindex == 0:
# ylist = y_array
# idy, yval = min(enumerate(ylist), key=lambda x: abs(x[1] - yval)) # Gets the position of the
# # y value closest to yval, and records actual yval
# else:
# idy = yval
# yval = y_array[idy]
# x = self._x_array
# y = self._data[idy]
# ax.scatter(x, y, s=s)
# ax.plot(x, self._full_fits[idy].best_fit, 'r-')
# ax.plot(x, self._full_fits[idy].init_fit, 'b--')
# if notext == 0:
# ax.set_ylabel("i_sense /nA")
# ax.set_xlabel("Plunger /mV")
# return ax
# else:
# return ax, idy
#
#
# class FitValues(NamedTuple):
# mids: List[float]
# thetas: List[float]
# amps: List[float]
# lins: List[float]
# consts: List[float]
# gs: List[float]
#
#
#
def get_param_estimates(x, data: np.array):
"""Return list of estimates of params for each row of data for a charge Transition"""
if data.ndim == 1:
return _get_param_estimates_1d(x, data)
elif data.ndim == 2:
return [_get_param_estimates_1d(x, z) for z in data]
else:
raise NotImplementedError(f"data ndim = {data.ndim}: data shape must be 1D or 2D")
def _get_param_estimates_1d(x, z: np.array) -> lm.Parameters:
"""Returns lm.Parameters for x, z data"""
assert z.ndim == 1
z, x = CU.resample_data(z, x, max_num_pnts=500)
params = lm.Parameters()
s = pd.Series(z) # Put into Pandas series so I can work with NaN's more easily
sx = pd.Series(x, index=s.index)
z = s[s.first_valid_index():s.last_valid_index() + 1] # type: pd.Series
x = sx[s.first_valid_index():s.last_valid_index() + 1]
if np.count_nonzero(~np.isnan(z)) > 10: # Prevent trying to work on rows with not enough data
try:
smooth_gradient = np.gradient(savgol_filter(x=z, window_length=int(len(z) / 20) * 2 + 1, polyorder=2,
mode='interp')) # window has to be odd
except np.linalg.linalg.LinAlgError: # Came across this error on 9/9/20 -- Weirdly works second time...
logger.warning('LinAlgError encountered, retrying')
smooth_gradient = np.gradient(savgol_filter(x=z, window_length=int(len(z) / 20) * 2 + 1, polyorder=2,
mode='interp')) # window has to be odd
x0i = np.nanargmin(smooth_gradient) # Index of steepest descent in data
mid = x.iloc[x0i] # X value of guessed middle index
amp = np.nanmax(z) - | np.nanmin(z) | numpy.nanmin |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 4 11:07:16 2021
@author: arnep
"""
from scipy import optimize
import numpy as np
import json
from scipy.stats import norm
def llik_gaussian(v, F):
"""
Log likelihood function of a Gaussian model, based on the prediction error
decomposition.
Parameters
----------
v : array-like
Prediction errors. 1D or 2D.
F : array-like
Prediction error variance. 2D or 3D array.
Returns
-------
llik : integer
Negative loglikeilhood.
"""
#set all elements which are nan to zero
v_temp = v.copy()
v_temp[np.isnan(v_temp)] = 0
#get length of the error terms
T = len(v)
#compute the sum of vt*Ft^-1*vt' for all t in T
accum = 0
for t in range(T):
for i in range(v.shape[1]):
accum += v_temp[t,i]*np.linalg.inv(np.matrix(F[t,i]))*v_temp[t,i].transpose()
#log likelihood function: -n/2 * log(2*pi) - 1/2*sum(log(F_t) + v_t^2/F_t)
l = -(T / 2) * np.log(2 * np.pi) - (1 / 2) * (np.log(np.linalg.det(F)).sum()) - (1 / 2) * (
accum)
#use of mean (sum would also be possible, but in the optimisation this would amount to the same)
llik = -np.mean(l) #negative as minimizer function is used
return llik
def ml_estimator_matrix( y, matr, param_loc, optim_fun, filter_init, param_init,
bnds, covariance_loc = [], method = 'L-BFGS-B',
options = {'eps': 1e-07,'disp': True,'maxiter': 200}, **llik_kwargs):
"""
MLE estimator which optimises the likelihood function given, based on
initialisation of both the filter and the parameters, bounds, and
a method.
Parameters
----------
y : array-like
Observations used for the fitting of a model.
matr : dict. System matrices of the state space model.
param_loc : dict
Locations of the parameters to be optimised in their
respective matrices.
kalman_llik : function.
Log likelihood function to be optimised.
filter_init : tuple
Initialisation of the Kalman filter.
param_init : tuple
Initialisation of the parameters.
bnds : tuple
Bounds for the parameters in the optimisation.
method : string, optional
method used for the optimisation of the likelihood function.
The default is 'L-BFGS-B'.
options : dict, optional
Options for the optimize.minimize function.
The default is {'eps': 1e-07,'disp': True,'maxiter': 200}.
**llik_kwargs : further arguments for the log likelihood function.
Returns
-------
results : dict
Output of optimize.minimize function
"""
#make object with all arguments together
if llik_kwargs:
args = (y, matr, param_loc, filter_init, covariance_loc, llik_kwargs)
else:
args = (y, matr, param_loc, filter_init, covariance_loc)
#optimize log_likelihood
results = optimize.minimize(optim_fun, param_init,
options=options, args = args,
method=method, bounds=bnds)
#print the parameters and the AIC
estimates = results.x
results['llik'] = -results.fun
results['AIC'] = 2*len(param_init) - results['llik']
print('params: ' + str(estimates))
print('likelihood: ' +str( results['llik'] ))
print('AIC: ' +str(results['AIC']))
return results
### implement fully!!!!
def dim_check(T, R, Z, Q, H, c, d):
"""
Returns true if the dimensions are okay
Parameters
----------
T : array-like
System matrix T.
R : array-like
System matrix R.
Z : array-like
System matrix Z.
Q : array-like
System matrix Q.
H : array-like
System matrix H.
c : array-like
System matrix c.
d : array-like
System matrix d.
Returns
-------
bool
returns True if dimensions are appropriate and False otherwise.
"""
return True
def collect_3d(dict_syst_matr):
"""
Takes a dict of system matrices in, and returns a list of the matrix names
which are in 3D. Used in the Kalman recursions to be able for the functions
to work on time-varying system matrices as well as constant system matrices.
Parameters
----------
dict_syst_matr : dict
dict with all system matrices.
Returns
-------
list_3d : list
list of system matrices in 3D.
"""
list_3d = list()
for key in dict_syst_matr.keys():
if len(dict_syst_matr[key].shape) >2:
list_3d.append(key)
return list_3d
def convert_matrix(*args):
"""
Convert arrays to matrices
Parameters
----------
*args : list
arrays to be converted to matrices.
Returns
-------
args : np.matrix
Arrays converted in matrices.
"""
for el in args:
el = np.matrix(el)
return args
class state_spacer():
def __init__(self, *matrices):
"""
Implementation of the following model:
yt = c + Zt alphat + epst, epst ~ NID(0,H)
alphat+1 = d + alphat + Rt etat, etat ~NID(0,Q)
define time-varying structural matrices in dimension (row, column, time)
Parameters
----------
*matrices : dict
System matrices of the state space model.
Returns
-------
None.
"""
self.init_matrices(*matrices)
self.fit_parameters = {}
self.fit_results = {}
self.fitted = False
def init_matrices(self, T=None, R=None, Z=None, Q=None, H=None,
c=None, d=None, y= None, y_dim =1, states = 1, eta_size = 1):
"""
Sets the initial system matrices. When no matrices are specified, default
initial system matrices are set. It is also possible to detremine the
size of the error terms, the default matrices will then be set according
to the dimensions specified.
Parameters
----------
T : array-like, optional
T system matrix. Can be 1D, 2D or 3D. If not filled in, T is an
np.eye matrix with the dimensions equalling the number of states.
The default is None.
R : array-like, optional
R system matrix. Can be 1D, 2D or 3D. If not filled in, R is a
matrix with ones with the dimensions the states and the number of
eta error terms.
The default is None.
Z : array-like, optional
Z system matrix. Can be 1D, 2D or 3D. If not filled in, Z is a
matrix with ones with the dimensions the number of time series in y
and the number of states.
The default is None.
Q : array-like, optional
Q system matrix. Can be 1D, 2D or 3D. If not filled in, Q is an
eye matrix with the dimensions the number of eta error terms.
The default is None.
H : array-like, optional
H system matrix. Can be 1D, 2D or 3D. If not filled in, H is an
eye matrix with the dimensions the number of epsilon error terms.
The default is None.
c : array-like, optional
c system matrix. Can be 1D, 2D or 3D. If not filled in, c is an
vector with the dimensions the number of time series in y.
The default is None.
d : array-like, optional
d system matrix. Can be 1D, 2D or 3D. If not filled in, d is a
vector with the dimensions the number of states.
The default is None.
y : array-like, optional
Data. When added, this allows the function to correctly specify
the system matrices dimensions. Specifying explicit matrices
may (partially) override the information provided here.
The default is None.
y_dim : integer, optional
Number of time series in y. Instead of adding the data, this number
can be added, which allows the function to correctly specify the
system matrix dimensions. Specifying explicit matrices
may (partially) override the information provided here.
The default is 1.
states : integer, optional
Number of states desired in the state space model. Specifying
explicit matrices may (partially) override the information provided
here.The default is 1.
eta_size : integer optional
number of eta terms to be added in the state space model. Specifying
explicit matrices may (partially) override the information provided
here.
The default is 1.
Returns
-------
None.
"""
#check to see if the matrices given have valid dimensions
if dim_check(T, R, Z, Q, H, c, d):
self.matr = {}
if T is None:
self.matr['T'] = np.eye((states))
else:
self.matr['T'] = np.array(T)
self.matr['T'] = self.matr['T'].astype(float)
if R is None:
self.matr['R'] = np.ones((states, eta_size))
else:
self.matr['R'] = np.array(R)
self.matr['R'] = self.matr['R'].astype(float)
if Z is None:
if y is not None:
self.matr['Z'] = np.ones((y.shape[1], states))
else:
self.matr['Z'] = np.ones((y_dim, states))
else:
self.matr['Z'] = np.array(Z)
self.matr['Z'] = self.matr['Z'].astype(float)
if Q is None:
self.matr['Q'] = np.eye(eta_size)
else:
self.matr['Q'] = np.array(Q)
self.matr['Q'] = self.matr['Q'].astype(float)
if H is None:
if y is not None:
self.matr['H'] = np.eye(y.shape[1])
else:
self.matr['H'] = np.eye(y_dim)
else:
self.matr['H'] = np.array(H)
self.matr['H'] = self.matr['H'].astype(float)
if c is None:
if y is not None:
self.matr['c'] = np.zeros((y.shape[1], 1))
else:
self.matr['c'] = np.zeros((y_dim, 1))
else:
self.matr['c'] = np.array(c)
self.matr['c'] = self.matr['c'].astype(float)
if d is None:
self.matr['d'] = np.zeros((self.matr['T'].shape[0],1))
else:
self.matr['d'] = np.array(d)
self.matr['d'] = self.matr['d'].astype(float)
self.list_3d = collect_3d(self.matr)
else:
print("error: dimensions don't match")
def get_matrices(self, syst_matr):
"""
Helper function. It looks which matrices
are 3D, collects these in a list, and for all 2D matrices ensures that
they are in a np.matrix element.
Parameters
----------
syst_matr : dict
Dict containing the system matrices.
Returns
-------
syst_matr : dict
Dict where all 2D matrices are in a np.matrix() object.
list_3d : list
list of 3D matrices.
"""
#get list of the matrices in 3D
list_3d = collect_3d(syst_matr)
#ensure the 2D matrices are in a np.matrix() object
for el in filter(lambda el: el not in list_3d, syst_matr.keys()):
syst_matr[el] = np.matrix(syst_matr[el])
return syst_matr, list_3d
def get_syst_matrices(self, list_3d, t, matrices):
"""
Function which unpacks the dict with all the system matrices for time
t so that this can be conveniently used in the recursions.
Parameters
----------
list_3d : list
List of matrices which are in 3D.
t : integer
time t.
matrices : dict
Dict of matrices.
Returns
-------
T : np.matrix()
System matrix Tt.
R : np.matrix()
System matrix Rt.
Z : np.matrix()
System matrix Zt.
Q : np.matrix()
System matrix Qt.
H : np.matrix()
System matrix Ht.
c : np.matrix()
System matrix ct.
d : np.matrix()
System matrix dt.
"""
#get the dict with the sytem matrices of time t
matr = self.transit_syst_matrix(list_3d, t, matrices.copy())
#return this unpacked
return matr['T'], matr['R'], matr['Z'], matr['Q'], matr['H'], matr['c'], matr['d']
def transit_syst_matrix(self, list_trans, t, matr):
"""
For the 3D system matrices, the matrix of time t is obtained and put in
a np.matrix object.
Parameters
----------
list_trans : list
List of transition matrices which are in 3D.
t : integer
Time t, for the system matrices in 3D.
matr : dict
System matrices (where some are 3D).
Returns
-------
matr : dict
System matrices, where the relevant 2D matrix is chosen.
"""
for el in list_trans:
matr[el] = np.matrix(matr[el][:,:,t])
return matr
def kalman_init(self,y, filter_init, time):
"""
Helper function, which defines all the necessary output matrices and
initialises.
Parameters
----------
y : array-like
Observations data.
filter_init : tuple
Initialisation of the Kalman filter.
time : integer
number of Kalman iterations to be done.
Returns
-------
at : array-like
empty array for at.
Pt : array-like
empty array for Pt.
a : array-like
empty array for a.
P : array-like
empty array for P.
F : array-like
empty array for F.
K : array-like
empty array for K.
v : array-like
empty array for v.
"""
#get initialisation of the filter
a_init = np.matrix(filter_init[0])
P_init = np.matrix(filter_init[1])
#create empty arrays
at = np.zeros((time, a_init.shape[0], a_init.shape[1]))
Pt = np.zeros((time, P_init.shape[0], P_init.shape[1]))
a = np.zeros((time + 1, a_init.shape[0], a_init.shape[1]))
P = np.zeros((time + 1, P_init.shape[0], P_init.shape[1]))
F = np.zeros((time , y.shape[1], y.shape[1]))
K = np.zeros((time , a_init.shape[1], y.shape[1]))
v = np.zeros((time , y.shape[1]))
#fill first element with the initialisation
a[0,:] = a_init
P[0,:] = P_init
return at, Pt, a, P, F, K, v
def kalman_filter_iteration(self, yt, a, P, Z, T, c, d, H, Q, R,
v, F, att, Ptt, tol =1e7 ):
"""
Single iteration of the Kalman filter.
v_t = y_t - Z_t*a_t - c_t
F_t = Z_t*P_t* Z_t' + H_t
K_t = T_t*P_t*Z_t'*F_t-1
a_{t+1} = T_t* a_t + K_t*v_t + d
P_{t+1} = T*P_t*T_t' + R_t*Q_t*R_t' - K_t*F_t*K_t'
Parameters
----------
yt : int or array-like
Observation data at time t.
a : int or array-like
State prediction for time t.
P : int or array-like
Variance of state prediction for time t.
Z : array-like
System matrix Zt.
T : array-like
System matrix Tt.
c : array-like
System matrix ct.
d : array-like
System matrix dt.
H : array-like
System matrix Ht.
Q : array-like
System matrix Qt.
R : array-like
System matrix Rt.
v : int or array-like
Previous prediction error.
F : int or array-like
Previous prediction error variance.
att : int or array-like
Previous filtered state (t-1).
Ptt : int or array-like
Previous filtered state variance (t-1).
Returns
-------
v : int or array-like
New prediction error.
F : int or array-like
New prediction error variance.
K : int or array-like
New K.
att : int or array-like
New filtered state (time t).
Ptt : int or array-like
New filtered state variance (time t).
at1 : int or array-like
New state prediction for t + 1.
Pt1 : int or array-like
Variance of state prediction for t + 1.
c : array-like
Just c, no transformation happens in normal Kalman filter.
d : array-like
Just d, no transformation happens in normal Kalman filter.
"""
if not np.isnan(yt):
#v and a are transposed
v = yt -a*Z.transpose() - c.transpose()
#F, P and K are not transposed
F = Z*P*Z.transpose() + H
M = P*Z.transpose()*np.linalg.inv(F)
K = T*M
att = a + v*M.transpose()
Ptt = P - M*F*M.transpose()
at1 = a*T.transpose() + v*K.transpose() + d.transpose()
Pt1 = T*P*T.transpose() + R*Q*R.transpose() - K*F*K.transpose()
else:
#v and a are transposed
v = yt*np.nan
#F, P and K are not transposed
F = np.matrix(np.ones(H.shape)*tol)
M = np.matrix(np.zeros((P*Z.transpose()*np.linalg.inv(F)).shape))
K = T*M
att = a
Ptt = P
at1 = a*T.transpose() + d.transpose()
Pt1 = T*P*T.transpose() + R*Q*R.transpose()
return v, F, K, att, Ptt, at1, Pt1, c, d
def create_empty_objs(self, yt, H, at, Pt):
"""
Helper function to create certain empty objects, which are later
used in the code.
Parameters
----------
yt : array-like
Observations.
H : array-like
H system matrix of time t.
at : array-like
array in the form of the filtered state.
Pt : array-like
array in the form of the filtered state variance.
Returns
-------
v_obj : array-like
empty v array.
F_obj : array-like
empty F array.
att_obj : array-like
empty att array.
Ptt_obj : array-like
empty Ptt array.
"""
v_obj = np.zeros(yt.shape)
F_obj = np.zeros(H.shape)
att_obj = np.zeros(at.shape)
Ptt_obj = np.zeros(Pt.shape)
return v_obj, F_obj, att_obj, Ptt_obj
def kalman_filter_base(self, y, filter_init, syst_matr):
"""
Kalman filter recursions, based on the system matrices and the initialisation
of the filter given. It first gets the processed matrices by calling
the helper functions, initialises the output arrays, and then
applies the filter.
Parameters
----------
y : array-like
Observation data.
filter_init : tuple
Initialisation of the filter.
syst_matr : dict
Dictionnary containging all the system matrices.
Returns
-------
at : array-like
Filtered states.
Pt : array-like
Filtered state variances.
a : array-like
Filtered state predictions.
P : array-like
Filtered state prediction variances.
v : array-like
Filtered prediction errors.
F : array-like
Filtered prediction error variances.
K : array-like
Filtered K (convenient result for other computations).
newC : array-like
same as c in the normal Kalman filter, given for coherence with other
methods.
newD : array-like
same as d in the normal Kalman filter, given for coherence with other
methods.
"""
#get the length of the array
time = len(y)
#convert system arrays to matrices, and get the 3D system matrices
matrices, list_3d = self.get_matrices(syst_matr)
#initialise the Kalman filter
at, Pt, a, P, F, K, v = self.kalman_init(y, filter_init, time)
#get the system matrices belonging to the first observation
t = 0
T, R, Z, Q, H, c, d = self.get_syst_matrices(list_3d, t, matrices)
#get an array in the shape of the first observation
yt = np.zeros(y[t].shape)
#initialise the arrays for new c and new d. Not used in the base filter
#only here for compability with more advanced filters
newC = np.zeros((self.matr['c'].shape[0], self.matr['c'].shape[1], time))
newD = np.zeros((self.matr['d'].shape[0], self.matr['d'].shape[1], time ))
#create empty objects for the results
v_obj, F_obj, att_obj, Ptt_obj = self.create_empty_objs(yt, H, a[t], P[t])
for t in range(time):
#get system matrices and the observation at time t
T, R, Z, Q, H, c, d = self.get_syst_matrices(list_3d, t, matrices)
yt = y[t]
filter_args = yt, a[t], P[t], Z, T, c, d, H, Q, R, v_obj, F_obj, att_obj, Ptt_obj
v[t], F[t], K[t], at[t], Pt[t], a[t+1], P[t+1], newC[:,:,t], newD[:,:,t] = self.kalman_filter_iteration(*filter_args)
return at, Pt, a, P, v, F, K, newC, newD
def kalman_filter(self, y, filter_init):
"""
Function which executes the Kalman filter base, and stores the results
in a dict, which is more convenient for final users.
Parameters
----------
y : array-like
Observation data.
filter_init : tuple
Initalisation of the filter.
Returns
-------
o : Dict
Filter output.
"""
o = {}
o["at"], o["Pt"], o["a"], o["P"], o["v"], o["F"], o["K"], o["newC"], o["newD"] = self.kalman_filter_base(y, filter_init, self.matr)
return o
def smoothing_iteration(self, v, F, r, T, K, Z, N, P, a):
"""
Single smoothing iteration recursion when the observation is available.
Lt+1 = Tt+1 - Kt+1 Zt+1
r_t = v_t+1*(F_{t+1}^-1)'*Z_t + r{t+1}*L{t+1}
N_t = Z'*F_{t+1}^-1*Z_t + L{t+1}*N{t+1}*L{t+1}
alpha{t+1} = a{t+1} + r[t]*P{t+1}'
V{t+1} = P{t+1} - P{t+1}*N_t*P{t+1}
Parameters
----------
v : array-like
Prediction error (value of t+1).
F : array-like
Prediction error variance (value of t+1).
r : array-like
Intermediate result in the smoothing recursions (value of t+1).
T : array-like
System matrix T (value of t+1).
K : array-like
Intermediate result K in the filter recursions (value of t+1).
Z : array-like
System matrix Z (value of t+1).
N : array-like
Intermediate result N in the filter recursions (value of t+1).
P : array-like
State prediction variance (value of t+1).
a : array-like
State prediction (value of t+1).
Returns
-------
L : array-like
Intermediate result in the smoothing recursions (value of t).
r : array-like
Intermediate result in the smoothing recursions (value of t).
N : array-like
Intermediate result N in the filter recursions (value of t).
alpha : array-like
Smoothed state (value of t).
V : array-like
Smoothed state variance (value of t).
"""
if not np.isnan(v):
L = T - K*Z
r= v*np.linalg.inv(F).transpose()*Z + (r*L)
N = Z.transpose()*np.linalg.inv(F)*Z + L.transpose()*N*L
alpha = a + np.dot(r,P.transpose())
V = P - P*N*P
else:
L = T
r= r*L
N = L.transpose()*N*L
alpha = a + np.dot(r,P.transpose())
V = P - P*N*P
return L, r, N, alpha, V
def smoother_base(self, y, filter_init, return_smoothed_errors=True):
"""
Kalman smoothing recursions, based on the system matrices and the
initialisation of the filter given. It first gets the processed matrices
by calling the helper functions, initialises the output arrays. Then,
it calls the Kalman filter and uses this to calculate the smoothing
recursions in separate functions, depending on whether the observation
is missing or not. The smoothed errors are also computed if indicated.
Parameters
----------
y : array-like
Observation data.
filter_init : tuple
Initialisation of the Kalman filter.
return_smoothed_errors : boolean, optional
Indicates whether the smoothed errors also should be computed.
The default is True.
Returns
-------
Kalman smoothing output : several array-like items
Output of the Kalman smoother (at, Pt, a, P, v, F, K, newC, newD
the same as the output of the Kalman filter). Additionally also
alpha, V (smoothed state and smoothed state variance), and r and N
(intermediary results) are outputted.
Smoothed errors : several array-like items, optional
Output of the Kalman smoother. The output consists of u, D (intermediary
results), epsilon_hat, var_epsilon_cond, eta_hat, var_eta_cond
observation and state errors and their variances.
"""
#get state matrices
matrices, list_3d = self.get_matrices(self.matr)
#apply Kalman filter
at, Pt, a, P, v, F, K, newC, newD =self.kalman_filter_base(y, filter_init, self.matr)
#initialise output arrays
r = np.zeros((a.shape))
r[:] = np.nan
N = np.zeros((P.shape))
N[:] = np.nan
r[len(a)-2] = 0
N[len(a)-2] = 0
alpha = np.zeros(a.shape)
alpha[:] = np.nan
V = np.zeros(P.shape)
V[:] = np.nan
r, N, alpha, V = r[:len(r)-1], N[:len(N)-1], alpha[:len(alpha)-1], V[:len(V)-1]
#flow if there are missing observations
if np.isnan(np.sum(v)):
#loop over the observations backwards
for t in range(len(a)-3, -1,-1):
#get the matrices at time t+1
T, _, Z, _, _, _, _ = self.get_syst_matrices(list_3d, t+1, matrices.copy())
args = v[t+1], F[t+1], r[t+1], T, K[t+1], Z, N[t+1], P[t+1], a[t+1]
L, r[t], N[t], alpha[t+1], V[t+1] = self.smoothing_iteration(*args)
#last recursion for alpha and V at time 0
t = - 1
T, _, Z, _, _, _, _ = self.get_syst_matrices(list_3d, t+1, matrices.copy())
args = v[t+1], F[t+1], r[t+1], T, K[t+1], Z, N[t+1], P[t+1], a[t+1]
_, _, _, alpha[t+1], V[t+1] = self.smoothing_iteration(*args)
#flow if no missing observations
else:
#loop over the observations backwards
for t in range(len(a)-3, -1,-1):
T, _, Z, _, _, _, _ = self.get_syst_matrices(list_3d, t+1, matrices.copy())
args = v[t+1], F[t+1], r[t+1], T, K[t+1], Z, N[t+1], P[t+1], a[t+1]
L, r[t], N[t], alpha[t+1], V[t+1] = self.smoothing_iteration(*args)
#recursion at time 0 for alpha and V
t = - 1
T, _, Z, _, _, _, _ = self.get_syst_matrices(list_3d, t+1, matrices.copy())
args = v[t+1], F[t+1], r[t+1], T, K[t+1], Z, N[t+1], P[t+1], a[t+1]
_, _, _, alpha[t+1], V[t+1] = self.smoothing_iteration(*args)
#if the smoothed errors also need to be computed
if return_smoothed_errors:
#compute smoothed erros
u, D, epsilon_hat, var_epsilon_cond, eta_hat, var_eta_cond = self.disturbance_smoothing_errors(v, F, K, r, N, matrices, list_3d)
return at, Pt, a, P, v, F, K, newC, newD, alpha, V, r, N, u, D, epsilon_hat, var_epsilon_cond, eta_hat, var_eta_cond
#only return smoother output
else:
return at, Pt, a, P, v, F, K, newC, newD, alpha, V, r, N
def smoother(self,y, filter_init, return_smoothed_errors=True):
"""
Wrapper around the smoother base function, to store results in a dict.
Parameters
----------
y : array-like
Observation data.
filter_init : tuple
Initialisation of the Kalman filter.
return_smoothed_errors : boolean, optional
Indicates whether the smoothed errors also should be computed.
The default is True.
Returns
-------
o : dict
Kalman smoother output.
e : dict
Smoothed error output.
"""
if return_smoothed_errors:
o = {}
e = {}
o["at"], o["Pt"], o["a"], o["P"], o["v"], o["F"], o["K"], o["newC"], o["newD"], o["alpha"], o["V"], o["r"], o["N"], e['u'], e['D'], e['epsilon_hat'], e['var_epsilon_cond'], e['eta_hat'], e['var_eta_cond'] = self.smoother_base(y, filter_init)
return {'output' : o, 'errors' : e}
else:
o = {}
o["at"], o["Pt"], o["a"], o["P"], o["v"], o["F"], o["K"], o["newC"], o["newD"], o["alpha"], o["V"], o["r"], o["N"] = self.smoother_base(y, filter_init, return_smoothed_errors=False)
return o
def kalman_filter_CI(self, y, filter_init, conf=0.9):
alpha_div2 = (1 - conf)/2
n = norm.ppf(1 - alpha_div2)
o = {}
o["at"], o["Pt"], o["a"], o["P"], o["v"], o["F"], o["K"], o["newC"], o["newD"] = self.kalman_filter_base(y, filter_init, self.matr)
o['CI_filtered_' + str(conf) + "_lower"] = o["at"] - n*np.linalg.cholesky(o["Pt"])
o['CI_filtered_' + str(conf) + "_upper"] = o["at"] + n*np.linalg.cholesky(o["Pt"])
return o
def kalman_smoother_CI(self, y, filter_init, conf=0.9, return_smoothed_errors=True):
alpha_div2 = (1 - conf)/2
n = norm.ppf(1 - alpha_div2)
if return_smoothed_errors:
o = {}
e = {}
o["at"], o["Pt"], o["a"], o["P"], o["v"], o["F"], o["K"], o["newC"], o["newD"], o["alpha"], o["V"], o["r"], o["N"], e['u'], e['D'], e['epsilon_hat'], e['var_epsilon_cond'], e['eta_hat'], e['var_eta_cond'] = self.smoother_base(y, filter_init)
else:
o = {}
o["at"], o["Pt"], o["a"], o["P"], o["v"], o["F"], o["K"], o["newC"], o["newD"], o["alpha"], o["V"], o["r"], o["N"] = self.smoother_base(y, filter_init, return_smoothed_errors)
o['CI_filtered_' + str(conf) + "_lower"] = o["at"] - n*np.linalg.cholesky(o["Pt"])
o['CI_filtered_' + str(conf) + "_upper"] = o["at"] + n*np.linalg.cholesky(o["Pt"])
o['CI_smoothed_' + str(conf) + "_lower"] = o["alpha"] - n*np.linalg.cholesky(o["V"])
o['CI_smoothed_' + str(conf) + "_upper"] = o["alpha"] + n*np.linalg.cholesky(o["V"])
try:
return {'output' : o, 'errors' : e}
except NameError:
return o
def observation_CI(self, y, filter_init, n, conf=[0.9], dist_fun_alpha1=None, **kwargs):
at, Pt, a, P, v, F, K, newC, newD, alpha, V, r, N = self.smoother_base(y, filter_init, return_smoothed_errors=False)
y_hat = (alpha* self.matr['Z'].transpose() + np.array(self.matr['c'].transpose())).transpose()
y_array = self.monte_carlo_y(y, filter_init, n, dist_fun_alpha1=dist_fun_alpha1, **kwargs)
ub = np.zeros((y.shape[0],y.shape[1], len(conf)))
lb = np.zeros((y.shape[0],y.shape[1], len(conf)))
i = 0
for c in conf:
lb[:,:,i] = np.percentile(y_array,100*(1 - c)/2, axis=1)
ub[:,:,i] = np.percentile(y_array,100*(1 + c)/2, axis=1)
i+=1
return y_hat, ub, lb
def kalman_llik(self, param, y, matr, param_loc, filter_init, covariance_loc = [],
diffuse = 0, llik_fun = llik_gaussian):
"""
Loglikelihood function for the Kalman filter system matrices.
The function allows for specification of the elements in the system matrices
which are optimised, and which are remained fixed. A time-varying system
matrix needs to have its parameters which are to be estimated by the
maximum likelihood estimator fixed for the whole period.
Parameters
----------
param : dict
Parameter values tried.
y : array-like
Observation data.
matr : dict
System matrices used in the evaluation of the likelihood function.
param_loc : dict
Dictionnary with the locations and matrices of the parameters to
be optimised in the maximum likelihood function.
filter_init : tuple
initialisation of the filter.
llik_fun : function, optional
Function used to compute the log likelihood.
The default is llik_gaussian.
diffuse : integer, optional
Diffuse initialisation of the likelihood function. The default is 0.
Returns
-------
llik_fun(v, F) : integer
Evaluation of the log likelihood by the given function.
"""
i = 0
#get the elements which are optimised in the ML function
for element in param_loc:
matr[element[0]][element[1],element[2]] = param[i]
i += 1
#make covariance as rho*sigx*sigy (the element in the optimisation is rho)
for element in covariance_loc:
sigxsigy = (matr[element[0]][element[1],element[1]]**(.5))*(matr[element[0]][element[2],element[2]]**(.5))
matr[element[0]][element[1],element[2]] = matr[element[0]][element[1],element[2]]*sigxsigy
#make Q and H symmetric by a making triangular matrix (where diagonal is zero), transpose it and add it to the origianl matrix
for key in ['Q', 'H']:
#dummy where also diagonal is zero
dummy = matr[key].copy()
dummy[np.arange(dummy.shape[0])[:,None] <= np.arange(dummy.shape[1])] = 0
#ensure that the original is triangular
matr[key][np.arange(matr[key].shape[0])[:,None] < np.arange(matr[key].shape[1])] = 0
#necessary to handle 2D and 3D arrays respectively
if len(dummy.shape) > 2:
matr[key] = matr[key] + dummy.transpose((1,0,2)) #- np.diagonal(matr[key].diagonal(0,0,1).T)
else:
matr[key] = matr[key] + dummy.transpose((1,0)) #- np.diagonal(matr[key].diagonal(0,0,1).T)
#apply Kalman Filter
_, _, _, _, v, F, _, _, _ = self.kalman_filter_base(y, filter_init,
matr)
#first element not used in diffuse likelihood
v = v[diffuse:,:]
F = F[diffuse:,:,:]
return llik_fun(v, F)
def kalman_llik_diffuse(self, param, y, matr, param_loc, filter_init, covariance_loc = [],
llik_fun = llik_gaussian):
"""
Wrapper around the kalman_llik_base function where diffuse is set to 1.
Parameters
----------
param : dict
Parameter values tried.
y : array-like
Observation data.
matr : dict
System matrices used in the evaluation of the likelihood function.
param_loc : dict
Dictionnary with the locations and matrices of the parameters to
be optimised in the maximum likelihood function.
filter_init : tuple
initialisation of the filter.
llik_fun : function, optional
Function used to compute the log likelihood.
The default is llik_gaussian.
Returns
-------
self.kalman_llik_base( param, y, matr, param_loc, filter_init,
llik_gaussian, diffuse = 1) : integer
Evaluation of the log likelihood by the given function.
"""
return self.kalman_llik(param, y, matr, param_loc, filter_init, covariance_loc,
diffuse = 1, llik_fun=llik_fun)
def fit(self, y, fit_method= ml_estimator_matrix,
matrix_order = ['T','Z','R','Q','H','c','d'], **fit_kwargs):
"""
Fit function for estimating the system matrices based on the observations
given. The function collects the parameters which are to be estimated
by looking for np.nan values in the system matrices.
Parameters
----------
y : array-like
Observation data.
fit_method : function, optional
Function for the estimation of the parameters.
The default is ml_estimator_matrix.
matrix_order : list, optional
order of the system matrices. The default is ['T','Z','R','Q','H','c','d'].
**fit_kwargs : dict
additional arguments necessary for running the fit function.
Returns
-------
self : state_spacer object
object where the parameters of the state matrices are estimated.
"""
#make a dict which contains all parameter locations in the system
#matrices which need to be estimated
param_loc = []
covariance_loc = [] #also keep which covariances are being estimated
#go through teh system matrices
for key in (matrix_order):
#get the elements which are np.nan
nan_location = np.argwhere(np.isnan(self.matr[key]))[:,[0,1]]
if len(nan_location):
nan_location = np.unique(np.argwhere(np.isnan(self.matr[key]))[:,[0,1]],axis=0)
#add the matrix, as well as the location in the matrix to the dict
#with locations
for loc in nan_location:
#if nan in Q or H specified in upper part: bring it to lower part
if (key in ['Q','H']) & (loc[0] > loc[1]):
covariance_loc.append((key, loc[0], loc[1]))
if (key in ['Q','H']) & (loc[0] < loc[1]):
param_loc.append((key, loc[1], loc[0]))
covariance_loc.append((key, loc[1], loc[0])) #turned around (because matrices are made triangular)
else:
param_loc.append((key, loc[0], loc[1]))
param_loc = list(dict.fromkeys(param_loc))
#set zeros on upper part of Q and H (in the llik function, both are mirrored)
for key in ['Q','H']:
self.matr[key] = np.tril(self.matr[key])
fit_kwargs['covariance_loc'] = covariance_loc
#apply the fit method to the system matrices
res = fit_method(y, self.matr, param_loc, **fit_kwargs)
#get the results of the optimisation
param = res.x
i = 0
#get the elements which are optimised in the fit function
for element in param_loc:
self.matr[element[0]][element[1],element[2]] = param[i]
i += 1
for element in covariance_loc:
sigxsigy = (self.matr[element[0]][element[1],element[1]]**(.5))*(self.matr[element[0]][element[2],element[2]]**(.5))
self.matr[element[0]][element[1],element[2]] = self.matr[element[0]][element[1],element[2]]*sigxsigy
#make Q and H symmetric by a making triangular matrix (where diagonal is zero), transpose it and add it to the origianl matrix
for key in ['Q', 'H']:
#dummy where also diagonal is zero
dummy = self.matr[key].copy()
dummy[np.arange(dummy.shape[0])[:,None] <= np.arange(dummy.shape[1])] = 0
#ensure that the original is triangular
self.matr[key][np.arange(self.matr[key].shape[0])[:,None] < np.arange(self.matr[key].shape[1])] = 0
#necessary to handle 2D and 3D arrays respectively
if len(dummy.shape) > 2:
self.matr[key] = self.matr[key] + dummy.transpose((1,0,2)) #- np.diagonal(matr[key].diagonal(0,0,1).T)
else:
self.matr[key] = self.matr[key] + dummy.transpose((1,0)) #- np.diagonal(matr[key].diagonal(0,0,1).T)
#set boolean showing if the model is fitted to true
self.fitted = True
#store information about the fit procedure in the object
self.fit_parameters["fit_method"] = fit_method
self.fit_parameters["matrix_order"] = matrix_order
for kwarg in fit_kwargs.keys():
self.fit_parameters[str(kwarg)] = fit_kwargs[kwarg]
self.fit_parameters["param_loc"] = param_loc
self.fit_results = res
#return object
return self
def forecast_state(self, forecast_init, forecast_horizon):
y = np.zeros((forecast_horizon, self.matr['Z'].shape[0]))
y[:,:] = np.nan
at, Pt, a, P, _, _, _, _, _ = self.kalman_filter_base(y, forecast_init, self.matr)
return at, Pt, a, P
def forecast(self,forecast_init, forecast_horizon,n=1000,conf=[0.9],
dist_fun_alpha1=None, **kwargs):
y = np.zeros((forecast_horizon, self.matr['Z'].shape[0]))
y[:,:] = np.nan
at, Pt, a, P, _, _, _, _, _ = self.kalman_filter_base(y, forecast_init, self.matr)
y_hat, lb, ub = self.observation_CI(y, forecast_init, n, conf=conf,
dist_fun_alpha1=dist_fun_alpha1, **kwargs)
return y_hat, lb, ub
def disturbance_smoothing_errors_iteration(self, H, Q, R, v, F, K, r, N):
"""
Computation of the smoothing errors
Parameters
----------
H : np.matrix
System matrix Ht.
Q : np.matrix
System matrix Qt.
R : np.matrix
System matrix Rt.
v : array-like
Prediction error.
F : array-like
Prediction error variance.
K : array-like
K (Kalman filter output).
r : array-like
r (Kalman smoothing output).
N : array-like
N (Kalman smoothing output).
Returns
-------
u : array-like
u (intermediary result) of time t.
D : array-like
D (intermediary result) of time t.
epsilon_hat : array-like
Estimated observation error of time t.
var_epsilon_cond : array-like
Estimated observation error variance of time t.
eta_hat : array-like
Estimated state error of time t.
var_eta_cond : array-like
Estimated state error variance of time t.
"""
#convert arrays to matrices
v = np.matrix(v)
F = np.matrix(F)
K = np.matrix(K)
r = np.matrix(r)
N = np.matrix(N)
# calculate u = v_t*F_t^-1 - K_t*r_t
u = v * np.linalg.inv(F) - r * K
# calculate D_t = F_t^-1 + K_t* N_t*K_t
D = np.linalg.inv(F) + np.transpose(K) * N * K
# estimated epsilon_t= H * u_t
epsilon_hat = u * np.transpose(H)
# estimated conditional variance_t epsilon = H - H*D_t *H
var_epsilon_cond = H - H* D * H
# estimated eta_t= Q*R' * r_t
eta_hat = r * R * np.transpose(Q)
# estimated conditional variance_t eta = Q - Q*R'* N_t *R*Q
var_eta_cond = Q - Q * np.transpose(R) * N * R * Q
return u, D, epsilon_hat, var_epsilon_cond, eta_hat, var_eta_cond
def disturbance_smoothing_errors(self, v, F, K, r, N, matrices, list_3d):
"""
Function regulating the flow of computing the smoothingerrors
Parameters
----------
v : array-like
Prediction error.
F : array-like
Prediction error variance.
K : array-like
K (Kalman filter output).
r : array-like
r (Kalman smoothing output).
N : array-like
N (Kalman smoothing output).
matrices : dict
System matrices.
list_3d : list
List of 3D matrices.
Returns
-------
u : array-like
u (intermediary result).
D : array-like
D (intermediary result).
epsilon_hat : array-like
Estimated observation error.
var_epsilon_cond : array-like
Estimated observation error variance.
eta_hat : array-like
Estimated state error.
var_eta_cond : array-like
Estimated state error variance.
"""
#get the first system matrices for setting the array dimensions
_, _, _, Q, H, _, _ = self.get_syst_matrices(list_3d, 0, matrices.copy())
#get the length of the series
time = len(v)
#initialisation of the arrays
u = np.zeros((time, (v).shape[1], (np.linalg.inv(F)).shape[1]))
D = np.zeros((time, np.linalg.inv(F).shape[1], np.linalg.inv(F).shape[1]))
epsilon_hat = np.zeros((time, v.shape[1], H.shape[1]))
var_epsilon_cond = | np.zeros((time, H.shape[0], H.shape[1])) | numpy.zeros |
import numpy
class CreateSoftClipper:
"""Creating a limiter-kind audio-effect class/device
Its a wave-shaper and messes with dynamic range, but doesn't introduce latency.
Parameters
----------
drive : float
A value between 0.0 and 1.0, 0.0 meaning no wave shaping at all and 1.0 full drive.
Notes
-----
* You can go beyond 1.0, but I designed it to be at the sweet spot. Go to 70.0 if you want, but be warned.
"""
def __init__(self,drive = 0.44):
self.placeholder = True
self.drive = drive + 1
def apply(self, float_array_input):
"""Applying the Soft Clipper to a numpy-array
Parameters
----------
float_array_input : float
The array, which the effect should be applied on.
Returns
-------
float
The processed array, should be the exact same size as the input array
"""
remember_negative = numpy.where(float_array_input<0,True,False)
float_array_input = numpy.abs(float_array_input)
float_array_input = | numpy.clip(float_array_input,-1.0,1.0) | numpy.clip |
# coding: utf-8
# In[1]:
import numpy as np
import pickle
from itertools import chain
from collections import OrderedDict
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
import matplotlib.pylab as plt
from copy import deepcopy
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
import sys, os
sys.path.append(os.path.join(os.path.dirname("__file__"), '..', '..'))
from mela.util import plot_matrices, make_dir, get_struct_str, get_args, Early_Stopping, record_data, manifold_embedding
from mela.settings.filepath import variational_model_PATH, dataset_PATH
from mela.pytorch.net import Net
from mela.pytorch.util_pytorch import Loss_with_uncertainty
from mela.variational.util_variational import get_torch_tasks
from mela.variational.variational_meta_learning import Master_Model, Statistics_Net, Generative_Net, load_model_dict, get_regulated_statistics
from mela.variational.variational_meta_learning import VAE_Loss, sample_Gaussian, clone_net, get_nets, get_tasks, evaluate, get_reg, load_trained_models
from mela.variational.variational_meta_learning import plot_task_ensembles, plot_individual_tasks, plot_statistics_vs_z, plot_data_record, get_corrcoef
from mela.variational.variational_meta_learning import plot_few_shot_loss, plot_individual_tasks_bounce, plot_quick_learn_performance
from mela.variational.variational_meta_learning import get_latent_model_data, get_polynomial_class, get_Legendre_class, get_master_function
seed = 1
np.random.seed(seed)
torch.manual_seed(seed)
is_cuda = torch.cuda.is_available()
# ## Training:
# In[ ]:
task_id_list = [
# "latent-linear",
# "polynomial-3",
# "Legendre-3",
# "M-sawtooth",
# "M-sin",
# "M-Gaussian",
# "M-tanh",
# "M-softplus",
# "C-sin",
"C-tanh",
# "bounce-states",
# "bounce-images",
]
num_shots = 10
exp_id = "C-May8"
exp_mode = "meta"
input_size = 1
is_VAE = False
is_uncertainty_net = False
is_regulated_net = False
is_load_data = False
VAE_beta = 0.2
if task_id_list[0] == "C-sin":
statistics_output_neurons = 2
elif task_id_list[0] == "C-tanh":
statistics_output_neurons = 4
elif task_id_list[0] in ["bounce-states", "bounce-images"]:
statistics_output_neurons = 8
output_size = 1
lr = 5e-5
num_train_tasks = 50
num_test_tasks = 50
batch_size_task = min(50, num_train_tasks)
num_backwards = 1
num_iter = 10000
pre_pooling_neurons = 200
num_context_neurons = 0
statistics_pooling = "max"
main_hidden_neurons = (40, 40)
patience = 200
reg_amp = 1e-6
activation_gen = "leakyRelu"
activation_model = "leakyRelu"
optim_mode = "indi"
loss_core = "huber"
array_id = "new"
exp_id = get_args(exp_id, 1)
exp_mode = get_args(exp_mode, 2)
task_id_list = get_args(task_id_list, 3, type = "tuple")
statistics_output_neurons = get_args(statistics_output_neurons, 4, type = "int")
is_VAE = get_args(is_VAE, 5, type = "bool")
VAE_beta = get_args(VAE_beta, 6, type = "float")
lr = get_args(lr, 7, type = "float")
batch_size_task = get_args(batch_size_task, 8, type = "int")
pre_pooling_neurons = get_args(pre_pooling_neurons, 9, type = "int")
num_context_neurons = get_args(num_context_neurons, 10, type = "int")
statistics_pooling = get_args(statistics_pooling, 11)
main_hidden_neurons = get_args(main_hidden_neurons, 12, "tuple")
reg_amp = get_args(reg_amp, 13, type = "float")
activation_gen = get_args(activation_gen, 14)
activation_model = get_args(activation_model, 15)
optim_mode = get_args(optim_mode, 16)
is_uncertainty_net = get_args(is_uncertainty_net, 17, "bool")
loss_core = get_args(loss_core, 18)
array_id = get_args(array_id, 19)
try:
get_ipython().run_line_magic('matplotlib', 'inline')
isplot = True
except:
isplot = False
# Settings:
reg_dict = {"statistics_Net": {"weight": reg_amp, "bias": reg_amp},
"generative_Net": {"weight": reg_amp, "bias": reg_amp, "W_gen": reg_amp, "b_gen": reg_amp}}
task_settings = {
"xlim": (-5, 5),
"num_examples": 20,
"test_size": 0.5,
}
struct_param_pre = [
[60, "Simple_Layer", {}],
# [60, "Simple_Layer", {}],
[60, "Simple_Layer", {}],
[pre_pooling_neurons, "Simple_Layer", {"activation": "linear"}],
]
struct_param_post = None
struct_param_gen_base = [
[60, "Simple_Layer", {}],
# [60, "Simple_Layer", {}],
[60, "Simple_Layer", {}],
]
isParallel = False
inspect_interval = 50
save_interval = 100
filename = variational_model_PATH + "/trained_models/{0}/Net_{1}_{2}_input_{3}_({4},{5})_stat_{6}_pre_{7}_pool_{8}_context_{9}_hid_{10}_batch_{11}_back_{12}_VAE_{13}_{14}_uncer_{15}_lr_{16}_reg_{17}_actgen_{18}_actmodel_{19}_struct_{20}_{21}_core_{22}_{23}_".format(
exp_id, exp_mode, task_id_list, input_size, num_train_tasks, num_test_tasks, statistics_output_neurons, pre_pooling_neurons, statistics_pooling, num_context_neurons, main_hidden_neurons, batch_size_task, num_backwards, is_VAE, VAE_beta, is_uncertainty_net, lr, reg_amp, activation_gen, activation_model, get_struct_str(struct_param_gen_base), optim_mode, loss_core, exp_id)
make_dir(filename)
print(filename)
# Obtain tasks:
assert len(task_id_list) == 1
dataset_filename = dataset_PATH + task_id_list[0] + "_{0}-shot.p".format(num_shots)
tasks = pickle.load(open(dataset_filename, "rb"))
tasks_train = get_torch_tasks(tasks["tasks_train"], task_id_list[0], is_cuda = is_cuda)
tasks_test = get_torch_tasks(tasks["tasks_test"], task_id_list[0], num_tasks = num_test_tasks, is_cuda = is_cuda)
# Obtain nets:
statistics_Net, generative_Net, generative_Net_logstd = get_nets(input_size = input_size, output_size = output_size, main_hidden_neurons = main_hidden_neurons,
pre_pooling_neurons = pre_pooling_neurons, statistics_output_neurons = statistics_output_neurons, num_context_neurons = num_context_neurons,
struct_param_pre = struct_param_pre,
struct_param_gen_base = struct_param_gen_base,
activation_statistics = activation_gen,
activation_generative = activation_gen,
activation_model = activation_model,
statistics_pooling = statistics_pooling,
isParallel = isParallel,
is_VAE = is_VAE,
is_uncertainty_net = is_uncertainty_net,
is_cuda = is_cuda,
)
if is_regulated_net:
struct_param_regulated_Net = [
[40, "Simple_Layer", {}],
[40, "Simple_Layer", {}],
[1, "Simple_Layer", {"activation": "linear"}],
]
generative_Net = Net(input_size = input_size, struct_param = struct_param_regulated_Net, settings = {"activation": activation_model})
master_model = Master_Model(statistics_Net, generative_Net, generative_Net_logstd, is_cuda = is_cuda)
# Setting up optimizer and loss functions:
if is_uncertainty_net:
optimizer = optim.Adam(chain.from_iterable([statistics_Net.parameters(), generative_Net.parameters(), generative_Net_logstd.parameters()]), lr = lr)
else:
optimizer = optim.Adam(chain.from_iterable([statistics_Net.parameters(), generative_Net.parameters()]), lr = lr)
if loss_core == "mse":
loss_fun_core = nn.MSELoss(size_average = True)
elif loss_core == "huber":
loss_fun_core = nn.SmoothL1Loss(size_average = True)
else:
raise
if is_VAE:
criterion = VAE_Loss(criterion = loss_fun_core, prior = "Gaussian", beta = VAE_beta)
else:
if is_uncertainty_net:
criterion = Loss_with_uncertainty(core = loss_core)
else:
criterion = loss_fun_core
early_stopping = Early_Stopping(patience = patience)
# Setting up recordings:
all_keys = list(tasks_train.keys()) + list(tasks_test.keys())
data_record = {"loss": {key: [] for key in all_keys}, "loss_sampled": {key: [] for key in all_keys}, "mse": {key: [] for key in all_keys},
"reg": {key: [] for key in all_keys}, "KLD": {key: [] for key in all_keys}}
info_dict = {"array_id": array_id}
info_dict["data_record"] = data_record
info_dict["model_dict"] = []
record_data(data_record, [exp_id, tasks_train, tasks_test, task_id_list, task_settings, reg_dict, is_uncertainty_net, lr, pre_pooling_neurons, num_backwards, batch_size_task,
struct_param_gen_base, struct_param_pre, struct_param_post, statistics_pooling, activation_gen, activation_model],
["exp_id", "tasks_train", "tasks_test", "task_id_list", "task_settings", "reg_dict", "is_uncertainty_net", "lr", "pre_pooling_neurons", "num_backwards", "batch_size_task",
"struct_param_gen_base", "struct_param_pre", "struct_param_post", "statistics_pooling", "activation_gen", "activation_model"])
# Training:
for i in range(num_iter + 1):
chosen_task_keys = np.random.choice(list(tasks_train.keys()), batch_size_task, replace = False).tolist()
if optim_mode == "indi":
if is_VAE:
KLD_total = Variable(torch.FloatTensor([0]), requires_grad = False)
if is_cuda:
KLD_total = KLD_total.cuda()
for task_key, task in tasks_train.items():
if task_key not in chosen_task_keys:
continue
((X_train, y_train), (X_test, y_test)), _ = task
for k in range(num_backwards):
optimizer.zero_grad()
if is_VAE:
statistics_mu, statistics_logvar = statistics_Net(torch.cat([X_train, y_train], 1))
statistics = sample_Gaussian(statistics_mu, statistics_logvar)
if is_regulated_net:
statistics = get_regulated_statistics(generative_Net, statistics)
y_pred = generative_Net(X_test, statistics)
loss, KLD = criterion(y_pred, y_test, mu = statistics_mu, logvar = statistics_logvar)
KLD_total = KLD_total + KLD
else:
if is_uncertainty_net:
statistics_mu, statistics_logvar = statistics_Net(torch.cat([X_train, y_train], 1))
y_pred = generative_Net(X_test, statistics_mu)
y_pred_logstd = generative_Net_logstd(X_test, statistics_logvar)
loss = criterion(y_pred, y_test, log_std = y_pred_logstd)
else:
statistics = statistics_Net(torch.cat([X_train, y_train], 1))
if is_regulated_net:
statistics = get_regulated_statistics(generative_Net, statistics)
y_pred = generative_Net(X_test, statistics)
loss = criterion(y_pred, y_test)
reg = get_reg(reg_dict, statistics_Net = statistics_Net, generative_Net = generative_Net, is_cuda = is_cuda)
loss = loss + reg
loss.backward(retain_graph = True)
optimizer.step()
# Perform gradient on the KL-divergence:
if is_VAE:
KLD_total = KLD_total / batch_size_task
optimizer.zero_grad()
KLD_total.backward()
optimizer.step()
record_data(data_record, [KLD_total], ["KLD_total"])
elif optim_mode == "sum":
optimizer.zero_grad()
loss_total = Variable(torch.FloatTensor([0]), requires_grad = False)
if is_cuda:
loss_total = loss_total.cuda()
for task_key, task in tasks_train.items():
if task_key not in chosen_task_keys:
continue
((X_train, y_train), (X_test, y_test)), _ = task
if is_VAE:
statistics_mu, statistics_logvar = statistics_Net(torch.cat([X_train, y_train], 1))
statistics = sample_Gaussian(statistics_mu, statistics_logvar)
y_pred = generative_Net(X_test, statistics)
loss, KLD = criterion(y_pred, y_test, mu = statistics_mu, logvar = statistics_logvar)
loss = loss + KLD
else:
if is_uncertainty_net:
statistics_mu, statistics_logvar = statistics_Net(torch.cat([X_train, y_train], 1))
y_pred = generative_Net(X_test, statistics_mu)
y_pred_logstd = generative_Net_logstd(X_test, statistics_logvar)
loss = criterion(y_pred, y_test, log_std = y_pred_logstd)
else:
statistics = statistics_Net(torch.cat([X_train, y_train], 1))
y_pred = generative_Net(X_test, statistics)
loss = criterion(y_pred, y_test)
reg = get_reg(reg_dict, statistics_Net = statistics_Net, generative_Net = generative_Net, is_cuda = is_cuda)
loss_total = loss_total + loss + reg
loss_total.backward()
optimizer.step()
else:
raise Exception("optim_mode {0} not recognized!".format(optim_mode))
loss_test_record = []
for task_key, task in tasks_test.items():
loss_test, _, _, _ = evaluate(task, statistics_Net, generative_Net, generative_Net_logstd = generative_Net_logstd, criterion = criterion, is_VAE = is_VAE, is_regulated_net = is_regulated_net)
loss_test_record.append(loss_test)
to_stop = early_stopping.monitor(np.mean(loss_test_record))
# Validation and visualization:
if i % inspect_interval == 0 or to_stop:
print("=" * 50)
print("training tasks:")
for task_key, task in tasks_train.items():
loss_test, loss_test_sampled, mse, KLD_test = evaluate(task, statistics_Net, generative_Net, generative_Net_logstd = generative_Net_logstd, criterion = criterion, is_VAE = is_VAE, is_regulated_net = is_regulated_net)
reg = get_reg(reg_dict, statistics_Net = statistics_Net, generative_Net = generative_Net, is_cuda = is_cuda).data[0]
data_record["loss"][task_key].append(loss_test)
data_record["loss_sampled"][task_key].append(loss_test_sampled)
data_record["mse"][task_key].append(mse)
data_record["reg"][task_key].append(reg)
data_record["KLD"][task_key].append(KLD_test)
print('{0}\ttrain\t{1} \tloss: {2:.5f}\tloss_sampled:{3:.5f} \tmse:{4:.5f}\tKLD:{5:.6f}\treg:{6:.6f}'.format(i, task_key, loss_test, loss_test_sampled, mse, KLD_test, reg))
for task_key, task in tasks_test.items():
loss_test, loss_test_sampled, mse, KLD_test = evaluate(task, statistics_Net, generative_Net, generative_Net_logstd = generative_Net_logstd, criterion = criterion, is_VAE = is_VAE, is_regulated_net = is_regulated_net)
reg = get_reg(reg_dict, statistics_Net = statistics_Net, generative_Net = generative_Net, is_cuda = is_cuda).data[0]
data_record["loss"][task_key].append(loss_test)
data_record["loss_sampled"][task_key].append(loss_test_sampled)
data_record["mse"][task_key].append(mse)
data_record["reg"][task_key].append(reg)
data_record["KLD"][task_key].append(KLD_test)
print('{0}\ttrain\t{1} \tloss: {2:.5f}\tloss_sampled:{3:.5f} \tmse:{4:.5f}\tKLD:{5:.6f}\treg:{6:.6f}'.format(i, task_key, loss_test, loss_test_sampled, mse, KLD_test, reg))
loss_train_list = [data_record["loss"][task_key][-1] for task_key in tasks_train]
loss_test_list = [data_record["loss"][task_key][-1] for task_key in tasks_test]
loss_train_sampled_list = [data_record["loss_sampled"][task_key][-1] for task_key in tasks_train]
loss_test_sampled_list = [data_record["loss_sampled"][task_key][-1] for task_key in tasks_test]
mse_train_list = [data_record["mse"][task_key][-1] for task_key in tasks_train]
mse_test_list = [data_record["mse"][task_key][-1] for task_key in tasks_test]
reg_train_list = [data_record["reg"][task_key][-1] for task_key in tasks_train]
reg_test_list = [data_record["reg"][task_key][-1] for task_key in tasks_test]
mse_few_shot = plot_few_shot_loss(master_model, tasks_test, isplot = isplot)
plot_quick_learn_performance(master_model, tasks_test)
record_data(data_record,
[np.mean(loss_train_list), np.median(loss_train_list), np.mean(reg_train_list), i,
np.mean(loss_test_list), np.median(loss_test_list), np.mean(reg_test_list),
np.mean(loss_train_sampled_list), np.median(loss_train_sampled_list),
np.mean(loss_test_sampled_list), | np.median(loss_test_sampled_list) | numpy.median |
# -*- coding: utf-8 -*-
"""
Aquí vamos a meter todo lo relativo a la representación de las variables
de decisión así como la población (Punto, Poblacion, funciones de mutación,
funciones de cruce, funciones de selección)
"""
import numpy as np
from Estadisticas import Estadisticas
class Punto(np.ndarray):
'''Hereda de np.ndarray, representa una solución
En los puntos tenemos la mutación
Siempre vamos a considerar el propio punto como el genotipo
'''
def __new__(cls, dimensiones, initValue = None, rango = None, \
operadores = None, crowded_distance = None, generacion = 1, dist_fenotipo = None, **kwargs):
'''Para heredar de np.ndarray es necesario usar __new__ en lugar de __init__'''
obj = np.ndarray.__new__(cls, dimensiones, **kwargs)
obj.gen = generacion
obj.vals = None
obj.rest = None
obj.rgo = rango
obj.crwd = crowded_distance
obj.np = 0
obj.Sp = []
'''Operadores es un diccionario de operadores evolutivos'''
if not operadores is None:
Punto._mutar = operadores['mutador']
Punto._fenotipo = operadores['fenotipo']
if not dist_fenotipo is None:
Punto.dist_fenotipo = dist_fenotipo
obj.setPunto(vector = initValue)
return obj
def setPunto(self, vector = None):
if vector is None:
self[:] = 0
else:
for i in range(len(self)):
self[i] = vector[i]
def copy(self, **kwargs):
'''Devolvemos otro punto copia del actual'''
p = Punto(dimensiones = len(self), **kwargs)
p.gen = self.gen
p.vals = self.vals
p.rest = self.rest
p.rgo = self.rgo
p.crwd = self.crwd
p.np = self.np
p.Sp = self.Sp[:]
p.setPunto(vector = self)
return p
def fenotipo(self):
'''De momento trabajamos con representación real: fenotipo = genotipo'''
return self.__class__._fenotipo(self)
def rand(self, problema):
if problema.parametros.get('tipo_var', 'real') == 'real':
self[:] = (problema.lims[:, 1] - problema.lims[:, 0]) * np.random.rand(problema.dims) + problema.lims[:, 0]
else:
for i in range(problema.dims):
self[i] = np.random.choice(problema.lims[i])
def evaluado_en(self, problema):
'''Evaluamos el punto con las funciones que nos da el problema'''
if self.vals is None:
self.vals = problema.evaluador(self)
return self.vals
def violacion_restricciones(self, problema):
'''Calculamos el nivel de violación de las restricciones'''
if self.rest is None:
self.rest = problema.violacion_restricciones(self)
return self.rest
def mutar(self, problema):
'''Con esta orden le pedimos al punto que se mute'''
self.__class__._mutar(self, problema)
class Poblacion(list):
'''La población será una lista de Puntos que representará a las soluciones
En las poblaciones definimos el cruce y la selección'''
def __init__(self, size, operadores, generacion = 0, stats = None):
self.size = size
self.gen = generacion
if stats is None:
self.stats = Estadisticas('Estadisticas')
else:
self.stats = stats
self.stats.nuevo_Contador('gens') # Generación actual
if not operadores is None:
self.__class__._selector = operadores['selector']
self.__class__._cruzador = operadores['cruzador']
self.__class__._seleccionador = operadores['seleccionador']
def select_with(self, nomCaracteristica, valor):
'''Seleccionamos los puntos con cierta caracteristica'''
resultado = []
for p in self:
if p.__getattribute__(nomCaracteristica) == valor:
resultado.append(p)
return resultado
def selector(self, problema):
'''Seleccionamos para cruce'''
return self.__class__._selector(self, problema)
def cruzador(self, padre, madre, problema):
'''Cruzamos dos puntos'''
return self.__class__._cruzador(padre, madre, problema)
def seleccionador(self, subpoblacion, problema):
'''Seleccionamos de la población y quitamos los que no sirvan'''
return self.__class__._seleccionador(self, subpoblacion, problema)
def union(self, pop):
for p in pop:
self.append(p)
def borrar(self, conjunto):
for p in conjunto:
if p in self:
self.remove(p)
def fast_non_dominated_sort(self, problema):
'''Seguimos el algoritmo descrito en "A fast and elitist multiobjective GA: NSGA-II"'''
#TODO: Este procedimiento se puede mejorar no teniendo que calcular el rango de toda la población
frentes = [[]]
for p in self:
p.Sp, p.np = [], 0
for q in self:
dominio = problema.dominadoC(p, q)
if dominio == 1: # p domina a q
p.Sp.append(q)
elif dominio == -1: # q domina a p
p.np += 1
if p.np == 0:
p.rgo = 1
frentes[0].append(p)
i = 0
while True:
siguienteFrente = []
for p in frentes[i]:
for q in p.Sp:
q.np -= 1
if q.np == 0:
q.rgo = i + 2
siguienteFrente.append(q)
if siguienteFrente == []:
break
frentes.append(siguienteFrente[:])
i += 1
def __contains__(self, item):
for p in self:
if p is item:
return True
return False
def crowding_distance_assignment(I, problema):
'''Seguimos el algoritmo descrito en "A fast and elitist multiobjective GA: NSGA-II"'''
I.sort(reverse = True, key = lambda x: x[0])
extremos = [I[0], I[-1]]
for p in I:
p.crwd = 0
for p in extremos:
p.crwd = float('inf')
#TODO No encuentro la manera de hacer esto con numpy
objetivos = []
for p in I:
parcial = [p]
parcial.extend(p.evaluado_en(problema))
objetivos.append(parcial[:])
# objetivos[i] = [p_i, f1(p_i), f2(p_i), ..., fn(p_i)]
for i in range(1, len(problema.objetivos) + 1):
objetivos.sort(key=lambda x: x[i])
fmax = max(objetivos, key=lambda x: x[i])[i]
fmin = min(objetivos, key=lambda x: x[i])[i]
for j in range(1, len(objetivos) - 1):
objetivos[j][0].crwd += (objetivos[j+1][i] - objetivos[j-1][i]) / (fmax - fmin)
############################################
# FENOTIPOS
# Siempre tienen la misma firma:
# def nombre(punto)
# Devuelven
# el fenotipo que le corresponde al punto
############################################
def real(punto):
'''Representación real, fenotipo = genotipo'''
return punto
def binario(punto):
'''Representación binaria'''
fenotipo = []
for i in range(len(punto.dist_fenotipo)):
li = np.sum(punto.dist_fenotipo[:i])
ui = np.sum(punto.dist_fenotipo[:i + 1])
fenotipo.append(punto[li:ui])
return fenotipo
############################################
# OPERADORES DE MUTACIÓN
# Siempre tienen la misma firma:
# def nombre(punto, problema)
############################################
def mutador1(punto, problema):
'''Mutamos cualquier componente con probabilidad proporcional
a la dimensión del espacio y esa componente puede tomar cualquier punto'''
p = problema.parametros.get('pm', 1 / problema.dims)
mascara = np.random.rand(problema.dims) < p
punto[mascara] = (problema.lims[mascara, 1] - problema.lims[mascara, 0]) \
* np.random.rand(mascara.sum()) + problema.lims[mascara, 0]
def mutadorb(punto, problema):
'''Mutador de estados para variables discretas, se elige una componente
y se fuerza a que cambie a alguno de los otros estados'''
p = problema.parametros.get('pm', 1 / problema.dims)
mascara = np.random.rand(problema.dims) < p
for i in range(len(problema.lims)):
if not mascara[i]:
continue
nvalor = np.random.choice(problema.lims[i])
while nvalor == punto[i]:
nvalor = np.random.choice(problema.lims[i])
punto[i] = nvalor
def mutador_init(punto, problema):
'''Escogemos un punto cualquiera del espacio de decisión'''
punto.rand(problema)
def mutacion_aleatoria(punto, problema):
'''Cada componente es variada uniformemente con el rango máximo que se le permita'''
copy_lims = problema.lims.copy()
copy_lims[:,0] = np.abs(copy_lims[:,0] - punto)
copy_lims[:,1] = np.abs(copy_lims[:,1] - punto)
deltas = np.min(copy_lims, axis = 1) #máxima variabilidad permitida en cada componente
u = | np.random.rand(problema.dims) | numpy.random.rand |
import numpy as np
import rbmpy.rbm as rbm
import logging, math
from rbmpy.sampler import VanillaSampler
from rbmpy.progress import Progress
from scipy.special import expit
class VanillaTrainier(object):
"""Trainer that can knows how to update an RBM weights and hidden/visible states, requires a `Sampler`.
Args:
rbm (rbm.RBM): The RBM we are training.
sampler (sampler.Sampler): The sampler used to generate the reconstructions for the RBM's training.
Attributes:
rbm (rbm.RBM): The rbm this instance is training.
sampler (sampler.Sampler): The sampler for generating reconstructions for the RBM's training.
"""
def __init__(self, rbm, sampler):
self.rbm = rbm
self.sampler = sampler
self.progess_logger = None
def batch_train(self, epochs_per_batch, training, batches, learning_rate, use_visible_bias):
logger = Progress("Batch Logger", batches)
logger.set_percentage_update_frequency(10)
batch_size = math.floor(training.shape[0] / batches)
for batch in range(batches):
self.train(epochs_per_batch, training[(batch * batch_size):((batch + 1) * batch_size),:], learning_rate, use_visible_bias = use_visible_bias)
logger.set_completed_units(batch)
self.rbm.visible = training
def train(self, epochs, training ,learning_rate = 0.002, logging_freq = None, use_visible_bias = True):
"""
Train the rbm provided in the init to fit the given data.
Args:
epochs (int): The number of times to go over the training set, assumes this number is at least equal to the training set size.
training (numpy.array): The training set. The shape should match the RBM that the trainer was supplied.
learning_rate (Optional(float)): RBM's learning_rate, used in hebbian learning.
"""
if not use_visible_bias:
self.rbm.visible_bias = | np.zeros(self.rbm.visible_bias.shape) | numpy.zeros |
"""implementation of imagenet dataset
"""
# pylint: disable=unused-argument,missing-docstring
import json
import logging
import os
import time
import numpy as np
import tensorflow as tf
import dataloader
import ssd_constants
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("coco")
class Dataset(object):
def __init__(self):
self.arrival = None
self.image_list = []
self.label_list = []
self.image_list_inmemory = []
self.source_id_list_inmemory = []
self.raw_shape_list_inmemory = []
self.image_map = {}
self.last_loaded = -1
def preprocess(self, use_cache=True):
raise NotImplementedError("Dataset:preprocess")
def get_item_count(self):
return len(self.image_list)
def get_list(self):
raise NotImplementedError("Dataset:get_list")
def load_query_samples(self, sample_list):
self.image_list_inmemory = []
self.image_map = {}
for i, sample in enumerate(sample_list):
self.image_map[sample] = i
img, _ = self.get_item(sample)
self.image_list_inmemory.append(img)
self.last_loaded = time.time()
def unload_query_samples(self, sample_list):
if sample_list:
for sample in sample_list:
del self.image_map[sample]
else:
self.image_map = {}
self.image_list_inmemory = []
def get_samples(self, item_list):
data = [
self.image_list_inmemory[self.image_map[item]] for item in item_list
]
data = np.array(data)
source_id = [
self.label_list[self.idx_map[item]][ssd_constants.IDX]
for item in item_list
]
raw_shape = [
self.label_list[self.idx_map[item]][ssd_constants.RAW_SHAPE]
for item in item_list
]
raw_shape = np.array(raw_shape)
return (data, source_id, raw_shape), self.label_list[item_list]
def get_image_list_inmemory(self):
return np.array(self.image_list_inmemory)
def get_indices(self, item_list):
data = [self.image_map[item] for item in item_list]
source_id = [
self.label_list[self.idx_map[item]][ssd_constants.IDX]
for item in item_list
]
raw_shape = [
self.label_list[self.idx_map[item]][ssd_constants.RAW_SHAPE]
for item in item_list
]
raw_shape = np.array(raw_shape)
return (data, source_id, raw_shape), self.label_list[item_list]
def get_item_loc(self, index):
raise NotImplementedError("Dataset:get_item_loc")
class COCODataset(Dataset):
def __init__(self,
data_path,
image_list,
name,
use_cache=0,
image_format="NCHW",
count=None,
cache_dir=None,
annotation_file=None,
use_space_to_depth=False):
super(COCODataset, self).__init__()
if not cache_dir:
cache_dir = os.getcwd()
self.image_list = []
self.label_list = []
self.count = count
self.use_cache = use_cache
self.cache_dir = os.path.join(cache_dir, "preprocessed", name, image_format)
self.data_path = data_path
self.space_to_depth = use_space_to_depth
# input images are in HWC
self.need_transpose = True if image_format == "NCHW" else False
self.annotation_file = annotation_file
not_found = 0
tf.gfile.MakeDirs(self.cache_dir)
start = time.time()
ssd_dataloader = dataloader.SSDInputReader(
data_path,
transpose_input=self.need_transpose,
space_to_depth=self.space_to_depth)
dataset = ssd_dataloader()
self.images = {}
self.idx_map = {}
with tf.gfile.Open(self.annotation_file, "r") as f:
coco = json.load(f)
for idx, i in enumerate(coco["images"]):
self.images[i["id"]] = {
"file_name": i["file_name"],
ssd_constants.IDX: idx
}
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
item = 0
with tf.Session() as sess:
sess.run(iterator.initializer)
while True:
try:
labels = sess.run(next_element)
except tf.errors.OutOfRangeError:
break
labels[ssd_constants.IDX] = self.images[labels[
ssd_constants.SOURCE_ID]][ssd_constants.IDX]
image_name = labels[ssd_constants.SOURCE_ID]
dst = os.path.join(self.cache_dir, str(image_name) + ".npy")
with tf.gfile.Open(dst, "wb") as fout:
np.save(fout, labels[ssd_constants.IMAGE])
labels.pop(ssd_constants.IMAGE)
self.image_list.append(str(image_name))
self.label_list.append(labels)
self.idx_map[labels[ssd_constants.IDX]] = item
item = item + 1
# limit the dataset if requested
if self.count and len(self.image_list) >= self.count:
break
time_taken = time.time() - start
if not self.image_list:
log.error("no images in image list found")
raise ValueError("no images in image list found")
if not_found > 0:
log.info("reduced image list, %d images not found", not_found)
log.info("loaded {} images, cache={}, took={:.1f}sec".format(
len(self.image_list), use_cache, time_taken))
self.label_list = np.array(self.label_list)
def get_item(self, nr):
"""Get image by number in the list."""
dst = os.path.join(self.cache_dir,
self.image_list[self.idx_map[nr]] + ".npy")
with tf.gfile.Open(dst, "rb") as fout:
img = | np.load(fout) | numpy.load |
import numpy as np
class Perceptron(object):
def __init__(self, eta=0.01, n_iter=50, random_state=1):
# learning rate
self.eta = eta
# number of iterate
self.n_iter = n_iter
# random seed: reproducibility
self.random_state = random_state
def fit(self, X, y):
# set initial weights
rgen = np.random.RandomState(self.random_state)
self.weight_ = rgen.normal(loc=0.0, scale=0.01, size=1+X.shape[1])
# classification errors
self.errors_ = []
for _ in range(self.n_iter):
cnt = 0
for xrow, teacher in zip(X,y):
# calc delta_weight and update
update = self.eta * (teacher-self.predict(xrow))
self.weight_[1:] += update*xrow
self.weight_[0] += update
# count errors
cnt+=int(update != 0.0)
self.errors_.append(cnt)
return self
# cover X*w_colvec and x_rowvec*w_colvec
def matrix_product(self, X):
return self.weight_[0] + | np.dot(X, self.weight_[1:]) | numpy.dot |
# Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet.v2.src.initializers."""
import itertools
from absl.testing import parameterized
import numpy as np
from sonnet.src import initializers
from sonnet.src import test_utils
import tensorflow as tf
class InitializersTest(test_utils.TestCase, parameterized.TestCase):
def assertDifferentInitializerValues(self,
init,
shape=None,
dtype=tf.float32):
if shape is None:
shape = (100,)
t1 = self.evaluate(init(shape, dtype))
t2 = self.evaluate(init(shape, dtype))
self.assertEqual(t1.shape, shape)
self.assertEqual(t2.shape, shape)
self.assertFalse(np.allclose(t1, t2, rtol=1e-15, atol=1e-15))
def assertRange(self,
init,
shape,
target_mean=None,
target_std=None,
target_max=None,
target_min=None,
dtype=tf.float32):
output = self.evaluate(init(shape, dtype))
self.assertEqual(output.shape, shape)
lim = 4e-2
if target_std is not None:
self.assertNear(output.std(), target_std, err=lim)
if target_mean is not None:
self.assertNear(output.mean(), target_mean, err=lim)
if target_max is not None:
self.assertNear(output.max(), target_max, err=lim)
if target_min is not None:
self.assertNear(output.min(), target_min, err=lim)
class ConstantInitializersTest(InitializersTest):
@parameterized.parameters(tf.float32, tf.int32)
def testZeros(self, dtype):
self.assertRange(
initializers.Zeros(),
shape=(4, 5),
target_mean=0.,
target_max=0.,
dtype=dtype)
@parameterized.parameters(tf.float32, tf.int32)
def testOnes(self, dtype):
self.assertRange(
initializers.Ones(),
shape=(4, 5),
target_mean=1.,
target_max=1.,
dtype=dtype)
@parameterized.named_parameters(
("Tensor", lambda: tf.constant([1.0, 2.0, 3.0]), "Tensor"),
("Variable", lambda: tf.Variable([3.0, 2.0, 1.0]), "Variable"),
("List", lambda: [], "list"), ("Tuple", lambda: (), "tuple"))
def testConstantInvalidValue(self, value, value_type):
with self.assertRaisesRegex(
TypeError, r"Invalid type for value: .*{}.*".format(value_type)):
initializers.Constant(value())
@parameterized.parameters((42, tf.float32), (42.0, tf.float32),
(42, tf.int32))
def testConstantValidValue(self, value, dtype):
self.assertRange(
initializers.Constant(value),
shape=(4, 5),
target_mean=42.,
target_max=42.,
dtype=dtype)
@parameterized.parameters(initializers.Zeros, initializers.Ones)
def testInvalidDataType(self, initializer):
init = initializer()
with self.assertRaisesRegex(
ValueError, r"Expected integer or floating point type, got "):
init([1], dtype=tf.string)
def testInvalidDataTypeConstant(self):
init = initializers.Constant(0)
with self.assertRaisesRegex(
ValueError, r"Expected integer or floating point type, got "):
init([1], dtype=tf.string)
def testTFFunction(self):
init = initializers.Constant(2)
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
expected = init([7, 4], tf.float32)
x = f(tf.zeros([7, 4]))
self.assertAllEqual(expected, x)
def testBatchAgnostic(self):
init = initializers.Constant(2)
spec = tf.TensorSpec(shape=[None, None])
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
f = f.get_concrete_function(spec)
expected = init([7, 4], tf.float32)
x = f(tf.ones([7, 4]))
self.assertAllEqual(expected, x)
class RandomUniformInitializerTest(InitializersTest):
def testRangeInitializer(self):
shape = (16, 8, 128)
self.assertRange(
initializers.RandomUniform(minval=-1., maxval=1., seed=124.),
shape,
target_mean=0.,
target_max=1,
target_min=-1)
@parameterized.parameters(tf.float32, tf.int32)
def testDifferentInitializer(self, dtype):
init = initializers.RandomUniform(0, 10)
self.assertDifferentInitializerValues(init, dtype=dtype)
def testInvalidDataType(self):
init = initializers.RandomUniform()
with self.assertRaisesRegex(
ValueError, r"Expected integer or floating point type, got "):
init([1], dtype=tf.string)
def testTFFunction(self):
init = initializers.RandomUniform(seed=42)
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
expected = init([7, 4], tf.float32)
x = f(tf.zeros([7, 4]))
self.assertEqual(x.shape, [7, 4])
if self.primary_device != "TPU": # Seeds don't work as expected on TPU
self.assertAllEqual(expected, x)
def testBatchAgnostic(self):
init = initializers.RandomUniform(seed=42)
spec = tf.TensorSpec(shape=[None, None])
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
f = f.get_concrete_function(spec)
expected = init([7, 4], tf.float32)
x = f(tf.ones([7, 4]))
self.assertEqual(x.shape, [7, 4])
if self.primary_device != "TPU": # Seeds don't work as expected on TPU
self.assertAllEqual(expected, x)
class RandomNormalInitializerTest(InitializersTest):
def testRangeInitializer(self):
self.assertRange(
initializers.RandomNormal(mean=0, stddev=1, seed=153),
shape=(16, 8, 128),
target_mean=0.,
target_std=1)
def testDifferentInitializer(self):
init = initializers.RandomNormal(0.0, 1.0)
self.assertDifferentInitializerValues(init)
@parameterized.parameters(tf.int32, tf.string)
def testInvalidDataType(self, dtype):
init = initializers.RandomNormal(0.0, 1.0)
with self.assertRaisesRegex(ValueError,
r"Expected floating point type, got "):
init([1], dtype=dtype)
def testTFFunction(self):
init = initializers.RandomNormal(seed=42)
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
expected = init([7, 4], tf.float32)
x = f(tf.zeros([7, 4]))
self.assertEqual(x.shape, [7, 4])
if self.primary_device != "TPU": # Seeds don't work as expected on TPU
self.assertAllEqual(expected, x)
def testBatchAgnostic(self):
init = initializers.RandomNormal(seed=42)
spec = tf.TensorSpec(shape=[None, None])
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
f = f.get_concrete_function(spec)
expected = init([7, 4], tf.float32)
x = f(tf.ones([7, 4]))
self.assertEqual(x.shape, [7, 4])
if self.primary_device != "TPU": # Seeds don't work as expected on TPU
self.assertAllEqual(expected, x)
class TruncatedNormalInitializerTest(InitializersTest):
def testRangeInitializer(self):
self.assertRange(
initializers.TruncatedNormal(mean=0, stddev=1, seed=126),
shape=(16, 8, 128),
target_mean=0.,
target_max=2,
target_min=-2)
def testDifferentInitializer(self):
init = initializers.TruncatedNormal(0.0, 1.0)
self.assertDifferentInitializerValues(init)
@parameterized.parameters(tf.int32, tf.string)
def testInvalidDataType(self, dtype):
init = initializers.TruncatedNormal(0.0, 1.0)
with self.assertRaisesRegex(ValueError,
r"Expected floating point type, got "):
init([1], dtype=dtype)
def testTFFunction(self):
init = initializers.TruncatedNormal(seed=42)
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
expected = init([7, 4], tf.float32)
x = f(tf.zeros([7, 4]))
self.assertEqual(x.shape, [7, 4])
if self.primary_device != "TPU": # Seeds don't work as expected on TPU
self.assertAllEqual(expected, x)
def testBatchAgnostic(self):
init = initializers.TruncatedNormal(seed=42)
spec = tf.TensorSpec(shape=[None, None])
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
f = f.get_concrete_function(spec)
expected = init([7, 4], tf.float32)
x = f(tf.ones([7, 4]))
self.assertEqual(x.shape, [7, 4])
if self.primary_device != "TPU": # Seeds don't work as expected on TPU
self.assertAllEqual(expected, x)
class IdentityInitializerTest(InitializersTest):
@parameterized.parameters(
*itertools.product([(4, 5), (3, 3), (3, 4, 5),
(6, 2, 3, 3)], [3, 1], [tf.float32, tf.int32]))
def testRange(self, shape, gain, dtype):
if self.primary_device == "GPU" and dtype == tf.int32:
self.skipTest("tf.int32 not supported on GPU")
self.assertRange(
initializers.Identity(gain),
shape=shape,
target_mean=gain / shape[-1],
target_max=gain,
dtype=dtype)
def testInvalidDataType(self):
init = initializers.Identity()
with self.assertRaisesRegex(
ValueError, r"Expected integer or floating point type, got "):
init([1, 2], dtype=tf.string)
@parameterized.parameters(tf.float32, tf.int32)
def testInvalidShape(self, dtype):
init = initializers.Identity()
with self.assertRaisesRegex(
ValueError,
"The tensor to initialize must be at least two-dimensional"):
init([1], dtype=dtype)
def testTFFunction(self):
init = initializers.Identity()
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
expected = init([4, 4], tf.float32)
x = f(tf.ones([4, 4]))
self.assertAllEqual(expected, x)
def testTFFunction4D(self):
init = initializers.Identity()
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
expected = init([4, 4, 3, 2], tf.float32)
x = f(tf.ones([4, 4, 3, 2]))
self.assertAllEqual(expected, x)
def testBatchAgnostic(self):
init = initializers.Identity()
spec = tf.TensorSpec(shape=[None, None])
f = tf.function(lambda t: init(tf.shape(t), t.dtype))
f = f.get_concrete_function(spec)
expected = init([7, 4], tf.float32)
x = f(tf.ones([7, 4]))
self.assertAllEqual(expected, x)
class OrthogonalInitializerTest(InitializersTest):
def testRangeInitializer(self):
self.assertRange(
initializers.Orthogonal(seed=123), shape=(20, 20), target_mean=0.)
def testDuplicatedInitializer(self):
init = initializers.Orthogonal()
self.assertDifferentInitializerValues(init, (10, 10))
@parameterized.parameters(tf.int32, tf.string)
def testInvalidDataType(self, dtype):
init = initializers.Orthogonal()
with self.assertRaisesRegex(ValueError,
r"Expected floating point type, got "):
init([1, 2], dtype=dtype)
def testInvalidShape(self):
init = initializers.Orthogonal()
with self.assertRaisesRegex(
ValueError,
"The tensor to initialize must be at least two-dimensional"):
init([1], tf.float32)
@parameterized.named_parameters(
("Square", (10, 10)), ("3DSquare", (100, 5, 5)),
("3DRectangle", (10, 9, 8)), ("TallRectangle", (50, 40)),
("WideRectangle", (40, 50)))
def testShapesValues(self, shape):
init = initializers.Orthogonal()
tol = 1e-5
t = self.evaluate(init(shape, tf.float32))
self.assertAllEqual(tuple(shape), t.shape)
# Check orthogonality by computing the inner product
t = t.reshape(( | np.prod(t.shape[:-1]) | numpy.prod |
#
# This file is part of the FFEA simulation package
#
# Copyright (c) by the Theory and Development FFEA teams,
# as they appear in the README.md file.
#
# FFEA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FFEA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FFEA. If not, see <http://www.gnu.org/licenses/>.
#
# To help us fund FFEA development, we humbly ask that you cite
# the research papers on the package.
#
import sys, os
from time import sleep
import numpy as np
from FFEA_exceptions import *
class FFEA_node:
def __init__(self, fname = "", scale = 1.0, frame = 0):
self.reset()
self.scale = scale
if fname == "":
self.valid = True
if not "pymol" in sys.modules.keys() and not "FFEA_trajectory" in sys.modules.keys():
sys.stdout.write("Empty node object initialised.\n")
return
try:
self.load(fname)
except FFEAFormatError as e:
self.reset()
print_error()
print("Formatting error at line " + e.lin + "\nLine(s) should be formatted as follows:\n\n" + e.lstr)
raise
except FFEAIOError as e:
self.reset()
print_error()
print("Input error for file " + e.fname)
if e.fext != [""]:
print(" Acceptable file types:")
for ext in e.fext:
print(" " + ext)
except IOError:
raise
def load(self, fname, findex = 0):
sys.stdout.write("Loading FFEA node file...")
# File format?
base, ext = os.path.splitext(fname)
try:
if ext == ".node":
# Check if tetgen
with open(fname, "r") as fin:
line = fin.readline().strip()
if line == "ffea node file" or line == "walrus node file":
self.load_FFEA_node(fname)
elif len(line.split()) == 4:
self.load_tetgen_node(fname)
else:
raise FFEAFormatError(lin=1)
elif ext == ".out" or ext == ".traj" or ext == ".ftj":
self.load_traj(fname, findex)
elif ext == ".obj":
self.load_obj(fname)
elif ext == ".vol":
self.load_vol(fname)
else:
raise FFEAIOError(fname=fname, fext=[".node", ".out", ".traj", ".ftj", ".vol", ".obj"])
except:
raise
self.valid = True
self.empty = False
sys.stdout.write("done!\n")
def load_FFEA_node(self, fname):
# Open file
try:
fin = open(fname, "r")
except(IOError):
raise
# Test format
line = fin.readline().strip()
if line != "ffea node file" and line != "walrus node file":
raise FFEAFormatError(lin=1, lstr="ffea node file")
try:
num_nodes = int(fin.readline().split()[1])
num_surface_nodes = int(fin.readline().split()[1])
num_interior_nodes = int(fin.readline().split()[1])
except IndexError:
raise FFEAFormatError(lin="2-4", lstr="num_nodes %d\nnum_surface_nodes %d\nnum_interior_nodes %d")
if fin.readline().strip() != "surface nodes:":
raise FFEAFormatError(lin="5", lstr="surface nodes:")
# Read nodes now
try:
j = 0
for i in range(num_surface_nodes):
sline = fin.readline().split()
n = [self.scale * float(sline[0]), self.scale * float(sline[1]), self.scale * float(sline[2])]
self.add_node(n, nodetype = 0)
if fin.readline().strip() != "interior nodes:":
if num_interior_nodes != 0:
raise FFEAFormatError(lin=num_surface_nodes + 6, lstr="interior nodes:")
i = num_surface_nodes
for j in range(num_interior_nodes):
sline = fin.readline().split()
n = [self.scale * float(sline[0]), self.scale * float(sline[1]), self.scale * float(sline[2])]
self.add_node(n, nodetype = 1)
except (IndexError, ValueError):
raise FFEAFormatError(lin=i+j+6, lstr="%f %f %f")
except:
raise
fin.close()
# Numpy it up, for speed
self.pos = | np.array(self.pos) | numpy.array |
import numpy as np
import xarray as xr
### Save scalar to VTK (version 1) files
def da2vtk1(da, filename):
vals = da.transpose('z','y','x').values
header = """# vtk DataFile Version 1.0
vtk output
BINARY
DATASET STRUCTURED_POINTS
DIMENSIONS %d %d %d
ASPECT_RATIO %f %f %f
ORIGIN %f %f %f
POINT_DATA %d
SCALARS %s float
LOOKUP_TABLE default
""" % (da.x.shape[0],da.y.shape[0],da.z.shape[0],
(np.nanmax(da.x.values)-np.nanmin(da.x.values))/(da.x.shape[0]-1),
(np.nanmax(da.y.values)-np.nanmin(da.y.values))/(da.y.shape[0]-1),
(np.nanmax(da.z.values)-np.nanmin(da.z.values))/(da.z.shape[0]-1),
np.nanmin(da.x.values),
np.nanmin(da.y.values),
np.nanmin(da.z.values),
da.x.shape[0]*da.y.shape[0]*da.z.shape[0],
da.name)
with open(filename, 'wb') as f:
f.write(bytes(header,'utf-8'))
np.array(vals, dtype=np.float32).byteswap().tofile(f)
### Save vector with components (i,j,k) to VTK (version 4.2) binary files
# ds2vtk3(ds, 'velocity', fname + '.vtk')
def ds2vtk3(ds, name, filename):
da = ds.transpose('z','y','x')
header = """# vtk DataFile Version 4.2
vtk output
BINARY
DATASET STRUCTURED_POINTS
DIMENSIONS %d %d %d
SPACING %f %f %f
ORIGIN %f %f %f
POINT_DATA %d
VECTORS %s float
""" % (da.x.shape[0],da.y.shape[0],da.z.shape[0],
(np.nanmax(da.x.values)-np.nanmin(da.x.values))/(da.x.shape[0]-1),
(np.nanmax(da.y.values)-np.nanmin(da.y.values))/(da.y.shape[0]-1),
(np.nanmax(da.z.values)-np.nanmin(da.z.values))/(da.z.shape[0]-1),
np.nanmin(da.x.values),
np.nanmin(da.y.values),
| np.nanmin(da.z.values) | numpy.nanmin |
import numpy as np
# Function that creates a collection of allowed masks
def create_strided_masks(mask_size=20, stride=5, img_size=64):
# Number of masks
num_masks = (img_size-mask_size) // stride + 1
# Leftover space
leftover_space = 2
# Empty masks
out_masks = np.zeros((num_masks, num_masks, img_size, img_size, 3))
# Populate in both dimensions
for h_mask_idx in range(num_masks):
for v_mask_idx in range(num_masks):
out_masks[h_mask_idx, v_mask_idx,
(leftover_space+stride*h_mask_idx):(leftover_space+stride*h_mask_idx+mask_size),
(leftover_space+stride*v_mask_idx):(leftover_space+stride*v_mask_idx+mask_size), :] = 1.
# Flatten
out_masks = np.reshape(out_masks, (-1, img_size, img_size, 3))
return out_masks
# Function that integrates gradient over a set of masks, picks top C candidates,
# performs a forward pass, then select a final winner
def compute_gradient_magnitudes(grads, images, masks, model, anchors, minimize=True, C=5, img_size=64):
# Get number of queries
num_images = len(grads)
# Output masks
subwinner_masks = np.zeros((num_images, C, img_size, img_size, 3))
subwinner_images = np.zeros((num_images, C, img_size, img_size, 3))
# Square grads
squared_grads = np.square(grads)
# For each image, integrate and sort
for image_idx in range(num_images):
# MSE trick
grad_sums = | np.sum(squared_grads[image_idx][None, :] * masks, axis=(-1, -2, -3)) | numpy.sum |
# MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Modified code based on: https://github.com/eriklindernoren/Keras-GAN/blob/master/cyclegan/data_loader.py
from keras.preprocessing.image import ImageDataGenerator
import scipy
from glob import glob
import numpy as np
import os
class DataLoader:
def __init__(self, dataset_name, img_res=(128, 128)):
self.dataset_name = dataset_name
self.img_res = img_res
def load_data(self, domain, batch_size=1, is_testing=False):
data_type = "train%s" % domain if not is_testing else "test%s" % domain
path = glob('%s/%s/*' % (self.dataset_name, data_type))
batch_images = np.random.choice(path, size=batch_size)
imgs = []
for img_path in batch_images:
img = self.imread(img_path)
if not is_testing:
img = scipy.misc.imresize(img, self.img_res)
if np.random.random() > 0.5:
img = np.fliplr(img)
else:
img = scipy.misc.imresize(img, self.img_res)
imgs.append(img)
imgs = np.array(imgs)/127.5 - 1.
return imgs
def load_k_data(self, domain, image_number=10, is_testing=False):
data_type = "train%s" % domain if not is_testing else "test%s" % domain
path = glob('%s/%s/*' % (self.dataset_name, data_type))
images = np.array(path)
if image_number < len(path):
images = images[:image_number]
imgs = []
for img_path in images:
img = self.imread(img_path)
if not is_testing:
img = scipy.misc.imresize(img, self.img_res)
if np.random.random() > 0.5:
img = np.fliplr(img)
else:
img = scipy.misc.imresize(img, self.img_res)
imgs.append(img)
imgs = np.array(imgs) / 127.5 - 1.
return imgs
def load_batch(self, batch_size=1, is_testing=False, aug=False):
data_type = "train" if not is_testing else "test"
path_A = glob('%s/%sA/*' % (self.dataset_name, data_type))
path_B = glob('%s/%sB/*' % (self.dataset_name, data_type))
if not is_testing and aug:
datagen = ImageDataGenerator(
zoom_range=0.2,
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
brightness_range=[0.5, 1.5]
)
self.n_batches = int(min(len(path_A), len(path_B)) / batch_size)
total_samples = self.n_batches * batch_size
# Sample n_batches * batch_size from each path list so that model sees all
# samples from both domains
path_A = np.random.choice(path_A, total_samples, replace=False)
path_B = np.random.choice(path_B, total_samples, replace=False)
for i in range(self.n_batches-1):
batch_A = path_A[i*batch_size:(i+1)*batch_size]
batch_B = path_B[i*batch_size:(i+1)*batch_size]
imgs_A, imgs_B = [], []
for img_A, img_B in zip(batch_A, batch_B):
img_A = self.imread(img_A)
img_B = self.imread(img_B)
img_A = scipy.misc.imresize(img_A, self.img_res)
img_B = scipy.misc.imresize(img_B, self.img_res)
# Data augmenting
if not is_testing and aug:
img_A = datagen.random_transform(img_A)
img_B = datagen.random_transform(img_B)
imgs_A.append(img_A)
imgs_B.append(img_B)
imgs_A = np.array(imgs_A)/127.5 - 1.
imgs_B = | np.array(imgs_B) | numpy.array |
# -*- coding: utf-8 -*-
"""
Functions for estimating electricity prices, eeg levies, remunerations and other components, based on customer type and annual demand
@author: Abuzar and Shakhawat
"""
from typing import ValuesView
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
from scipy.interpolate import InterpolatedUnivariateSpline
def calculate_mean_price(customer_type, val_yearly_demand):
"""
Parameters
----------
customer_type : Type of customer, differentiated between household and industrial customers
total_demand : yearly electricity demand for household customers in KWh/y and for industrial customers in MWh/y
Returns
-------
mean_price: average price for the customer for the next year in cents/kWh
"""
def plotting(x,y, title, x_label, y_label, name_plot):
fig = plt.figure()
values = x
plt.plot (x,y)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.xticks(x,values)
plt.xticks(rotation = 45)
fig.savefig(name_plot, dpi=fig.dpi)
def haupt_tarif(data):
#haupt_tarrif = df_with_data
df_with_data = pd.read_excel(data)
yearly_mean = df_with_data.price.mean()
haupt_tarrif = df_with_data[df_with_data["hour"].isin([8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]) & df_with_data["Day"].isin(['Wednesday', 'Thursday', 'Friday', 'Monday', 'Tuesday'])]
cond = df_with_data['hour'].isin(haupt_tarrif['hour'])
df_with_data.drop(haupt_tarrif[cond].index, inplace = True)
ht_factor = haupt_tarrif.price.mean()/yearly_mean
return ht_factor
def neben_tarif(data):
#neben_tarrif = df_with_data
df_with_data = pd.read_excel(data)
yearly_mean = df_with_data.price.mean()
neben_tarrif = df_with_data[(df_with_data["hour"].isin([1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23, 24]) & df_with_data["Day"].isin(['Wednesday', 'Thursday', 'Friday', 'Monday', 'Tuesday'])) |(df_with_data["Day"].isin(['Saturday', 'Sunday']))]
neben_tarrif.head()
cond = df_with_data['hour'].isin(neben_tarrif['hour'])
df_with_data.drop(neben_tarrif[cond].index, inplace = True)
nt_factor = neben_tarrif.price.mean()/yearly_mean
return nt_factor
ht_factor = haupt_tarif("ht_nt_price.xlsx")
nt_factor = neben_tarif("ht_nt_price.xlsx")
#industrial 2000 - 20000 MWh
industrie_prices_without_VAT = pd.read_excel(r'Energiepreisentwicklung.xlsx',sheet_name='5.8.3 Strom - € - Industrie', skiprows = 5, nrows = 26, index_col = 0)
industrie_prices_without_VAT = industrie_prices_without_VAT.iloc[:,0]
industrie_prices_without_VAT = industrie_prices_without_VAT.reset_index()
industrie_prices_without_VAT["index"]= industrie_prices_without_VAT["index"].str.slice(start = 5)
industrie_prices_without_VAT.columns = ["year","price"]
industrie_prices_without_VAT = industrie_prices_without_VAT.set_index("year")
industrie_prices_without_VAT.index = industrie_prices_without_VAT.index.astype(str)
industrie_prices_without_VAT.index = pd.to_datetime(industrie_prices_without_VAT.index, errors='ignore')
industrie_prices_without_VAT = industrie_prices_without_VAT.astype(float)
industrie_prices_without_VAT = industrie_prices_without_VAT.resample('12M').mean()
industrie_prices_without_VAT.index = industrie_prices_without_VAT.index.astype(str)
industrie_prices_without_VAT.index= industrie_prices_without_VAT.index.str.slice(start = 0, stop = -6)
ht_industrie_prices_without_VAT = industrie_prices_without_VAT.price * ht_factor
nt_industrie_prices_without_VAT = industrie_prices_without_VAT.price * nt_factor
ht_industrie_prices_without_VAT = ht_industrie_prices_without_VAT.reset_index()
nt_industrie_prices_without_VAT = nt_industrie_prices_without_VAT.reset_index()
industrie_prices_without_VAT = industrie_prices_without_VAT.reset_index()
industrie_prices_without_VAT = industrie_prices_without_VAT[industrie_prices_without_VAT.year >= str(2016)]
#industrial prices > 150000 MWh/y
v_big_industrial_prices_BDEW = {'year': range(2019,2021), 'price': [3.77,3.05]}
v_big_industrial_prices_BDEW = pd.DataFrame(data=v_big_industrial_prices_BDEW)
v_big_industrial_prices_BDEW
#industrial prices between 70000-150000 MWh/y
big_industrial_prices_BDEW = {'year': range(2016,2021), 'price': [8.37, 9.96, 8.96, 9.28, 10.07]}
big_industrial_prices_BDEW = pd.DataFrame(data=big_industrial_prices_BDEW)
big_industrial_prices_BDEW
#industrial prices between 20000-70000 MWh/y
mid_industrie_prices = pd.read_excel(r'mid_size_industrial_prices.xlsx')
mid_industrie_prices.columns = ['year', 'price']
mid_industrie_prices
#household electricity prices between 2500-5000 KWh/y
household_prices_without_VAT = pd.read_excel(r'Energiepreisentwicklung.xlsx',sheet_name='5.8.2 Strom - € - Haushalte', skiprows = 5, nrows = 26, index_col = 0)
household_prices_without_VAT = household_prices_without_VAT.iloc[:,0]
household_prices_without_VAT = household_prices_without_VAT.reset_index()
household_prices_without_VAT["index"]= household_prices_without_VAT["index"].str.slice(start = 5)
household_prices_without_VAT.columns = ["year","price"]
household_prices_without_VAT = household_prices_without_VAT.set_index("year")
household_prices_without_VAT.index = household_prices_without_VAT.index.astype(str)
household_prices_without_VAT.index = pd.to_datetime(household_prices_without_VAT.index, errors='ignore')
household_prices_without_VAT = household_prices_without_VAT.astype(float)
household_prices_without_VAT = household_prices_without_VAT.resample('12M').mean()
household_prices_without_VAT.index = household_prices_without_VAT.index.astype(str)
household_prices_without_VAT.index= household_prices_without_VAT.index.str.slice(start = 0, stop = -6)
household_prices_without_VAT = household_prices_without_VAT[6:].reset_index()
household_prices_without_VAT = household_prices_without_VAT[household_prices_without_VAT.year >= str(2016)]
household_prices_without_VAT
if ((customer_type == 0) & ((val_yearly_demand >= 2500) & (val_yearly_demand <= 5000))):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
# ht_industrie_prices_without_VAT = household_prices
ht_household_prices_without_VAT = household_prices_without_VAT
ht_household_prices_without_VAT["year"] = ht_household_prices_without_VAT["year"].astype(int)
ht_year = ht_household_prices_without_VAT["year"]
ht_price = ht_household_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_household_prices_without_VAT = household_prices_without_VAT
nt_household_prices_without_VAT["year"] = nt_household_prices_without_VAT["year"].astype(int)
nt_year = nt_household_prices_without_VAT["year"]
nt_price = nt_household_prices_without_VAT["price"] * nt_factor
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
yt_household_prices_without_VAT = household_prices_without_VAT
yt_household_prices_without_VAT["year"] = yt_household_prices_without_VAT["year"].astype(int)
yt_year = yt_household_prices_without_VAT["year"]
yt_price = yt_household_prices_without_VAT["price"]
yt_new_year = np.append(yt_year, 2021)
yt_new_price = np.append(yt_price, (val1))
print(yt_new_year)
print(yt_new_price)
plotting(yt_new_year, yt_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
yt_household_prices_without_VAT = household_prices_without_VAT
yt_household_prices_without_VAT["year"] = yt_household_prices_without_VAT["year"].astype(int)
yt_year = yt_household_prices_without_VAT["year"]
yt_price = yt_household_prices_without_VAT["price"]
f = interpolate.interp1d(yt_year, yt_price, fill_value = "extrapolate")
p_2021 = f(2021)
yt_new_year = np.append(yt_year, 2021)
yt_new_price = np.append(yt_price, (f(2021)))
# ht_new_price = ht_new_price * ht_factor
print(yt_new_year)
print(yt_new_price)
plotting(yt_new_year, yt_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand > 0) & (val_yearly_demand < 2000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_household_prices_without_VAT = household_prices_without_VAT
ht_household_prices_without_VAT["year"] = ht_household_prices_without_VAT["year"].astype(int)
ht_year = ht_household_prices_without_VAT["year"]
ht_price = ht_household_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_industrie_prices_without_VAT = household_prices_without_VAT
nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
nt_year = nt_industrie_prices_without_VAT["year"]
nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
ht_industrie_prices_without_VAT = household_prices_without_VAT
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (val1))
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
# val1 = input("Enter your preferred price: ")
# val1 = float(val1)
ht_industrie_prices_without_VAT = household_prices_without_VAT
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
f = interpolate.interp1d(ht_year, ht_price, fill_value = "extrapolate")
p_2021 = f(2021)
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (f(2021)))
ht_new_price = ht_new_price
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand >= 2000) & (val_yearly_demand <= 20000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
nt_year = nt_industrie_prices_without_VAT["year"]
nt_price = nt_industrie_prices_without_VAT["price"]
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price * nt_factor, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
ht_industrie_prices_without_VAT = industrie_prices_without_VAT
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (val1))
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
# val1 = input("Enter your preferred price: ")
# val1 = float(val1)
ht_industrie_prices_without_VAT = industrie_prices_without_VAT
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
f = interpolate.interp1d(ht_year, ht_price, fill_value = "extrapolate")
p_2021 = f(2021)
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (f(2021)))
ht_new_price = ht_new_price
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand > 20000) & (val_yearly_demand <= 70000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_industrie_prices_without_VAT = mid_industrie_prices
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_industrie_prices_without_VAT = mid_industrie_prices
nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
nt_year = nt_industrie_prices_without_VAT["year"]
nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
ht_industrie_prices_without_VAT = mid_industrie_prices
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (val1))
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
ht_industrie_prices_without_VAT = mid_industrie_prices
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
f = interpolate.interp1d(ht_year, ht_price, fill_value = "extrapolate")
p_2021 = f(2021)
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (f(2021)))
ht_new_price = ht_new_price
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand > 70000) & (val_yearly_demand <= 150000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_industrie_prices_without_VAT = big_industrial_prices_BDEW
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_industrie_prices_without_VAT = big_industrial_prices_BDEW
nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
nt_year = nt_industrie_prices_without_VAT["year"]
nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
ht_industrie_prices_without_VAT = big_industrial_prices_BDEW
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
#nt_industrie_prices_without_VAT = big_industrial_prices_BDEW
#nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
#nt_year = nt_industrie_prices_without_VAT["year"]
#nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
#nt_new_year = np.append(nt_year, 2021)
#nt_new_price = np.append(nt_price, (val1))
#print(nt_new_year)
#print(nt_new_price)
# plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
#plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
ht_industrie_prices_without_VAT = big_industrial_prices_BDEW
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
f = interpolate.interp1d(ht_year, ht_price, fill_value = "extrapolate")
p_2021 = f(2021)
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (f(2021)))
ht_new_price = ht_new_price
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand > 150000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_industrie_prices_without_VAT = v_big_industrial_prices_BDEW
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"] * ht_factor
ht_new_year = | np.append(ht_year, 2021) | numpy.append |
# -*- coding: utf-8 -*-
import numpy as np
import neurokit2 as nk
def test_ppg_simulate():
ppg1 = nk.ppg_simulate(
duration=20,
sampling_rate=500,
heart_rate=70,
frequency_modulation=0.3,
ibi_randomness=0.25,
drift=1,
motion_amplitude=0.5,
powerline_amplitude=0.1,
burst_amplitude=1,
burst_number=5,
random_state=42,
show=False,
)
assert ppg1.size == 20 * 500
ppg2 = nk.ppg_simulate(
duration=200,
sampling_rate=1000,
heart_rate=70,
frequency_modulation=0.3,
ibi_randomness=0.25,
drift=1,
motion_amplitude=0.5,
powerline_amplitude=0.1,
burst_amplitude=1,
burst_number=5,
random_state=42,
show=False,
)
assert ppg2.size == 200 * 1000
# Ensure that frequency_modulation does not affect other signal properties.
ppg3 = nk.ppg_simulate(
duration=200,
sampling_rate=1000,
heart_rate=70,
frequency_modulation=1,
ibi_randomness=0.25,
drift=1,
motion_amplitude=0.5,
powerline_amplitude=0.1,
burst_amplitude=1,
burst_number=5,
random_state=42,
show=False,
)
assert np.allclose((ppg2.mean() - ppg3.mean()), 0, atol=1e-2)
assert np.allclose((ppg2.std() - ppg3.std()), 0, atol=1e-2)
# Ensure that ibi_randomness does not affect other signal properties.
ppg4 = nk.ppg_simulate(
duration=200,
sampling_rate=1000,
heart_rate=70,
frequency_modulation=1,
ibi_randomness=1,
drift=1,
motion_amplitude=0.5,
powerline_amplitude=0.1,
burst_amplitude=1,
burst_number=5,
random_state=42,
show=False,
)
assert np.allclose((ppg3.mean() - ppg4.mean()), 0, atol=1e-1)
assert np.allclose((ppg3.std() - ppg4.std()), 0, atol=1e-1)
# TODO: test influence of different noise configurations
def test_ppg_clean():
sampling_rate = 500
ppg = nk.ppg_simulate(
duration=30,
sampling_rate=sampling_rate,
heart_rate=180,
frequency_modulation=0.01,
ibi_randomness=0.1,
drift=1,
motion_amplitude=0.5,
powerline_amplitude=0.1,
burst_amplitude=1,
burst_number=5,
random_state=42,
show=False,
)
ppg_cleaned_elgendi = nk.ppg_clean(ppg, sampling_rate=sampling_rate, method="elgendi")
assert ppg.size == ppg_cleaned_elgendi.size
# Assert that bandpass filter with .5 Hz lowcut and 8 Hz highcut was applied.
fft_raw = np.abs(np.fft.rfft(ppg))
fft_elgendi = np.abs(np.fft.rfft(ppg_cleaned_elgendi))
freqs = np.fft.rfftfreq(ppg.size, 1 / sampling_rate)
assert | np.sum(fft_raw[freqs < 0.5]) | numpy.sum |
"""
Tests of neo.io.nixio_fr
"""
import numpy as np
import unittest
from quantities import s
from neo.io.nixio_fr import NixIO as NixIOfr
import quantities as pq
from neo.io.nixio import NixIO
from neo.test.iotest.common_io_test import BaseTestIO
from neo.core import Block, Segment, AnalogSignal, SpikeTrain, Event
try:
import nixio as nix
HAVE_NIX = True
except ImportError:
HAVE_NIX = False
import os
@unittest.skipUnless(HAVE_NIX, "Requires NIX")
class TestNixfr(BaseTestIO, unittest.TestCase, ):
ioclass = NixIOfr
entities_to_download = [
'nix/nixio_fr.nix'
]
entities_to_test = [
'nix/nixio_fr.nix'
]
def setUp(self):
super().setUp()
self.testfilename = self.get_local_path('nix/nixio_fr.nix')
self.reader_fr = NixIOfr(filename=self.testfilename)
self.reader_norm = NixIO(filename=self.testfilename, mode='ro')
self.blk = self.reader_fr.read_block(block_index=1, load_waveforms=True)
# read block with NixIOfr
self.blk1 = self.reader_norm.read_block(index=1) # read same block with NixIO
def tearDown(self):
self.reader_fr.file.close()
self.reader_norm.close()
def test_check_same_neo_structure(self):
self.assertEqual(len(self.blk.segments), len(self.blk1.segments))
for seg1, seg2 in zip(self.blk.segments, self.blk1.segments):
self.assertEqual(len(seg1.analogsignals), len(seg2.analogsignals))
self.assertEqual(len(seg1.spiketrains), len(seg2.spiketrains))
self.assertEqual(len(seg1.events), len(seg2.events))
self.assertEqual(len(seg1.epochs), len(seg2.epochs))
def test_check_same_data_content(self):
for seg1, seg2 in zip(self.blk.segments, self.blk1.segments):
for asig1, asig2 in zip(seg1.analogsignals, seg2.analogsignals):
np.testing.assert_almost_equal(asig1.magnitude, asig2.magnitude)
# not completely equal
for st1, st2 in zip(seg1.spiketrains, seg2.spiketrains):
np.testing.assert_array_equal(st1.magnitude, st2.times)
for wf1, wf2 in zip(st1.waveforms, st2.waveforms):
| np.testing.assert_array_equal(wf1.shape, wf2.shape) | numpy.testing.assert_array_equal |